From 326d9945f380b565aca416ff215dce14c30010b9 Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Fri, 3 Nov 2023 17:40:00 +0000 Subject: [PATCH 01/38] GCP implementation using thanos.io/objstore --- .../chunk/client/gcp/gcs_object_client.go | 2 + .../client/gcp/gcs_thanos_object_client.go | 160 ++++++++++++++++++ pkg/storage/factory.go | 4 + 3 files changed, 166 insertions(+) create mode 100644 pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client.go b/pkg/storage/chunk/client/gcp/gcs_object_client.go index d4b35e48d9df5..5677ce4175f6f 100644 --- a/pkg/storage/chunk/client/gcp/gcs_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_object_client.go @@ -19,6 +19,7 @@ import ( google_http "google.golang.org/api/transport/http" amnet "k8s.io/apimachinery/pkg/util/net" + "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" "github.com/grafana/loki/pkg/storage/chunk/client/util" @@ -35,6 +36,7 @@ type GCSObjectClient struct { // GCSConfig is config for the GCS Chunk Client. type GCSConfig struct { + bCfg bucket.Config `yaml:"bucket_config"` BucketName string `yaml:"bucket_name"` ServiceAccount flagext.Secret `yaml:"service_account"` ChunkBufferSize int `yaml:"chunk_buffer_size"` diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go new file mode 100644 index 0000000000000..5060580083d9b --- /dev/null +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -0,0 +1,160 @@ +package gcp + +import ( + "context" + "io" + "net" + "net/http" + + "cloud.google.com/go/storage" + "github.com/go-kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "google.golang.org/api/googleapi" + amnet "k8s.io/apimachinery/pkg/util/net" + + "github.com/grafana/loki/pkg/storage/bucket" + "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/grafana/loki/pkg/storage/chunk/client/hedging" +) + +type GCSThanosObjectClient struct { + cfg GCSConfig + client objstore.Bucket +} + +func NewGCSThanosObjectClient(ctx context.Context, cfg GCSConfig, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { + return newGCSThanosObjectClient(ctx, cfg, hedgingCfg, storage.NewClient) +} + +func newGCSThanosObjectClient(ctx context.Context, cfg GCSConfig, hedgingCfg hedging.Config, clientFactory ClientFactory) (*GCSThanosObjectClient, error) { + bucket, err := bucket.NewClient(ctx, cfg.bCfg, cfg.BucketName, log.NewNopLogger(), prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + return &GCSThanosObjectClient{ + cfg: cfg, + client: bucket, + }, nil +} + +func (s *GCSThanosObjectClient) Stop() { +} + +func (s *GCSThanosObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) { + return s.client.Exists(ctx, objectKey) +} + +// PutObject puts the specified bytes into the configured GCS bucket at the provided key +func (s *GCSThanosObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { + return s.client.Upload(ctx, objectKey, object) +} + +// GetObject returns a reader and the size for the specified object key from the configured GCS bucket. +func (s *GCSThanosObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { + reader, err := s.client.Get(ctx, objectKey) + if err != nil { + return nil, 0, err + } + // TODO (JoaoBraveCoding) currently returning 0 for the int64 as no + return reader, 0, err +} + +// List implements chunk.ObjectClient. +func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { + var storageObjects []client.StorageObject + var commonPrefixes []client.StorageCommonPrefix + q := &storage.Query{Prefix: prefix, Delimiter: delimiter} + + // Using delimiter and selected attributes doesn't work well together -- it returns nothing. + // Reason is that Go's API only sets "fields=items(name,updated)" parameter in the request, + // but what we really need is "fields=prefixes,items(name,updated)". Unfortunately we cannot set that, + // so instead we don't use attributes selection when using delimiter. + if delimiter == "" { + err := q.SetAttrSelection([]string{"Name", "Updated"}) + if err != nil { + return nil, nil, err + } + } + + s.client.Iter(ctx, prefix, func(objectKey string) error { + + // // When doing query with Delimiter, Prefix is the only field set for entries which represent synthetic "directory entries". + // if attr.Prefix != "" { + // commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(attr.Prefix)) + // continue + // } + + // storageObjects = append(storageObjects, client.StorageObject{ + // Key: attr.Name, + // ModifiedAt: attr.Updated, + // }) + + + return nil + + }) + + return storageObjects, commonPrefixes, nil +} + +// DeleteObject deletes the specified object key from the configured GCS bucket. +func (s *GCSThanosObjectClient) DeleteObject(ctx context.Context, objectKey string) error { + return s.client.Delete(ctx, objectKey) +} + +// IsObjectNotFoundErr returns true if error means that object is not found. Relevant to GetObject and DeleteObject operations. +func (s *GCSThanosObjectClient) IsObjectNotFoundErr(err error) bool { + return s.client.IsObjNotFoundErr(err) +} + +// IsStorageTimeoutErr returns true if error means that object cannot be retrieved right now due to server-side timeouts. +func (s *GCSThanosObjectClient) IsStorageTimeoutErr(err error) bool { + // TODO(dannyk): move these out to be generic + // context errors are all client-side + if isContextErr(err) { + return false + } + + // connection misconfiguration, or writing on a closed connection + // do NOT retry; this is not a server-side issue + if errors.Is(err, net.ErrClosed) || amnet.IsConnectionRefused(err) { + return false + } + + // this is a server-side timeout + if isTimeoutError(err) { + return true + } + + // connection closed (closed before established) or reset (closed after established) + // this is a server-side issue + if errors.Is(err, io.EOF) || amnet.IsConnectionReset(err) { + return true + } + + if gerr, ok := err.(*googleapi.Error); ok { + // https://cloud.google.com/storage/docs/retry-strategy + return gerr.Code == http.StatusRequestTimeout || + gerr.Code == http.StatusGatewayTimeout + } + + return false +} + +// IsStorageThrottledErr returns true if error means that object cannot be retrieved right now due to throttling. +func (s *GCSThanosObjectClient) IsStorageThrottledErr(err error) bool { + if gerr, ok := err.(*googleapi.Error); ok { + // https://cloud.google.com/storage/docs/retry-strategy + return gerr.Code == http.StatusTooManyRequests || + (gerr.Code/100 == 5) // all 5xx errors are retryable + } + + return false +} + +// IsRetryableErr returns true if the request failed due to some retryable server-side scenario +func (s *GCSThanosObjectClient) IsRetryableErr(err error) bool { + return s.IsStorageTimeoutErr(err) || s.IsStorageThrottledErr(err) +} diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index da687c5ea9c7b..2ab35eb51ad62 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -332,6 +332,7 @@ type Config struct { IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"` DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"` MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"` + ThanosObjectStore bool `yaml:"thanos_object_store"` MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` BoltDBShipperConfig boltdb.IndexCfg `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."` @@ -705,6 +706,9 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric if cfg.CongestionControl.Enabled { gcsCfg.EnableRetries = false } + if cfg.ThanosObjectStore { + return gcp.NewGCSThanosObjectClient(context.Background(), gcsCfg, cfg.Hedging) + } return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging) case config.StorageTypeAzure: From 5960741b458857405c5eb98082b212030934121a Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Wed, 8 Nov 2023 15:47:25 +0000 Subject: [PATCH 02/38] Implemented all ObjectClient methods --- .../client/gcp/gcs_thanos_object_client.go | 49 +++---- .../gcp/gcs_thanos_object_client_test.go | 122 ++++++++++++++++++ pkg/storage/factory.go | 15 ++- 3 files changed, 155 insertions(+), 31 deletions(-) create mode 100644 pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index 5060580083d9b..4e04da51d8eb5 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -5,8 +5,8 @@ import ( "io" "net" "net/http" + "strings" - "cloud.google.com/go/storage" "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -20,21 +20,20 @@ import ( ) type GCSThanosObjectClient struct { - cfg GCSConfig client objstore.Bucket } -func NewGCSThanosObjectClient(ctx context.Context, cfg GCSConfig, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { - return newGCSThanosObjectClient(ctx, cfg, hedgingCfg, storage.NewClient) +func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, logger log.Logger, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { + return newGCSThanosObjectClient(ctx, cfg, logger) } -func newGCSThanosObjectClient(ctx context.Context, cfg GCSConfig, hedgingCfg hedging.Config, clientFactory ClientFactory) (*GCSThanosObjectClient, error) { - bucket, err := bucket.NewClient(ctx, cfg.bCfg, cfg.BucketName, log.NewNopLogger(), prometheus.DefaultRegisterer) +func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, logger log.Logger) (*GCSThanosObjectClient, error) { + // TODO (JoaoBraveCoding) accessing cfg.GCS directly + bucket, err := bucket.NewClient(ctx, cfg, cfg.GCS.BucketName, logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } return &GCSThanosObjectClient{ - cfg: cfg, client: bucket, }, nil } @@ -65,36 +64,30 @@ func (s *GCSThanosObjectClient) GetObject(ctx context.Context, objectKey string) func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { var storageObjects []client.StorageObject var commonPrefixes []client.StorageCommonPrefix - q := &storage.Query{Prefix: prefix, Delimiter: delimiter} + iterParams := objstore.IterOption(func(params *objstore.IterParams) {}) - // Using delimiter and selected attributes doesn't work well together -- it returns nothing. - // Reason is that Go's API only sets "fields=items(name,updated)" parameter in the request, - // but what we really need is "fields=prefixes,items(name,updated)". Unfortunately we cannot set that, - // so instead we don't use attributes selection when using delimiter. if delimiter == "" { - err := q.SetAttrSelection([]string{"Name", "Updated"}) - if err != nil { - return nil, nil, err - } + iterParams = objstore.WithRecursiveIter } s.client.Iter(ctx, prefix, func(objectKey string) error { + if delimiter != "" && strings.HasSuffix(objectKey, delimiter) { + commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(objectKey)) + return nil + } + attr, err := s.client.Attributes(ctx, objectKey) + if err != nil { + return errors.Wrapf(err, "failed to get attributes for %s", objectKey) + } - // // When doing query with Delimiter, Prefix is the only field set for entries which represent synthetic "directory entries". - // if attr.Prefix != "" { - // commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(attr.Prefix)) - // continue - // } - - // storageObjects = append(storageObjects, client.StorageObject{ - // Key: attr.Name, - // ModifiedAt: attr.Updated, - // }) - + storageObjects = append(storageObjects, client.StorageObject{ + Key: objectKey, + ModifiedAt: attr.LastModified, + }) return nil - }) + }, iterParams) return storageObjects, commonPrefixes, nil } diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go new file mode 100644 index 0000000000000..0fcb35fe379ac --- /dev/null +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go @@ -0,0 +1,122 @@ +package gcp + +import ( + "bytes" + "context" + "sort" + "testing" + + "github.com/grafana/loki/pkg/storage/bucket/filesystem" + "github.com/grafana/loki/pkg/storage/chunk/client" + "github.com/stretchr/testify/require" +) + +func TestGCSThanosObjStore_List(t *testing.T) { + tests := []struct { + name string + prefix string + delimiter string + storageObjKeys []string + storageCommonPref []client.StorageCommonPrefix + wantErr error + }{ + { + "list_top_level_only", + "", + "/", + []string{"top-level-file-1", "top-level-file-2"}, + []client.StorageCommonPrefix{"dir-1/", "dir-2/", "depply/"}, + nil, + }, + { + "list_all_dir_1", + "dir-1", + "", + []string{"dir-1/file-1", "dir-1/file-2"}, + nil, + nil, + }, + { + "list_recursive", + "", + "", + []string{ + "top-level-file-1", + "top-level-file-2", + "dir-1/file-1", + "dir-1/file-2", + "dir-2/file-3", + "dir-2/file-4", + "dir-2/file-5", + "depply/nested/folder/a", + "depply/nested/folder/b", + "depply/nested/folder/c", + }, + nil, + nil, + }, + { + "unknown_prefix", + "test", + "", + []string{}, + nil, + nil, + }, + { + "only_storage_common_prefix", + "depply/", + "/", + []string{}, + []client.StorageCommonPrefix{ + "depply/nested/", + }, + nil, + }, + } + + for _, tt := range tests { + config := filesystem.Config{ + Directory: t.TempDir(), + } + newBucket, err := filesystem.NewBucketClient(config) + require.NoError(t, err) + + buff := bytes.NewBufferString("foo") + require.NoError(t, newBucket.Upload(context.Background(), "top-level-file-1", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "top-level-file-2", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "dir-1/file-1", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "dir-1/file-2", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-3", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-4", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "dir-2/file-5", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/a", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/b", buff)) + require.NoError(t, newBucket.Upload(context.Background(), "depply/nested/folder/c", buff)) + + gcpClient := &GCSThanosObjectClient{} + gcpClient.client = newBucket + + storageObj, storageCommonPref, err := gcpClient.List(context.Background(), tt.prefix, tt.delimiter) + if tt.wantErr != nil { + require.Equal(t, tt.wantErr.Error(), err.Error()) + continue + } + + keys := []string{} + for _, key := range storageObj { + keys = append(keys, key.Key) + } + + sort.Slice(tt.storageObjKeys, func(i, j int) bool { + return tt.storageObjKeys[i] < tt.storageObjKeys[j] + }) + sort.Slice(tt.storageCommonPref, func(i, j int) bool { + return tt.storageCommonPref[i] < tt.storageCommonPref[j] + }) + + require.NoError(t, err) + require.Equal(t, tt.storageObjKeys, keys) + require.Equal(t, tt.storageCommonPref, storageCommonPref) + } +} diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 2ab35eb51ad62..a78366062912a 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -14,6 +14,7 @@ import ( "github.com/grafana/dskit/flagext" + "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/cache" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" @@ -40,6 +41,7 @@ import ( "github.com/grafana/loki/pkg/storage/stores/shipper/indexshipper/indexgateway" "github.com/grafana/loki/pkg/util" "github.com/grafana/loki/pkg/util/constants" + utilLog "github.com/grafana/loki/pkg/util/log" ) var ( @@ -332,7 +334,9 @@ type Config struct { IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"` DisableBroadIndexQueries bool `yaml:"disable_broad_index_queries"` MaxParallelGetChunk int `yaml:"max_parallel_get_chunk"` - ThanosObjectStore bool `yaml:"thanos_object_store"` + + ThanosObjStore bool `yaml:"thanos_objstore"` + ObjStoreConf bucket.Config `yaml:"objstore_config"` MaxChunkBatchSize int `yaml:"max_chunk_batch_size"` BoltDBShipperConfig boltdb.IndexCfg `yaml:"boltdb_shipper" doc:"description=Configures storing index in an Object Store (GCS/S3/Azure/Swift/COS/Filesystem) in the form of boltdb files. Required fields only required when boltdb-shipper is defined in config."` @@ -361,6 +365,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Hedging.RegisterFlagsWithPrefix("store.", f) cfg.CongestionControl.RegisterFlagsWithPrefix("store.", f) + cfg.ObjStoreConf.RegisterFlags(f) + cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "", f) f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") f.StringVar(&cfg.ObjectPrefix, "store.object-prefix", "", "The prefix to all keys inserted in object storage. Example: loki-instances/west/") @@ -398,6 +404,9 @@ func (cfg *Config) Validate() error { if err := cfg.BloomShipperConfig.Validate(); err != nil { return errors.Wrap(err, "invalid bloom shipper config") } + if err := cfg.ObjStoreConf.Validate(); err != nil { + return err + } return cfg.NamedStores.Validate() } @@ -706,8 +715,8 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric if cfg.CongestionControl.Enabled { gcsCfg.EnableRetries = false } - if cfg.ThanosObjectStore { - return gcp.NewGCSThanosObjectClient(context.Background(), gcsCfg, cfg.Hedging) + if cfg.ThanosObjStore { + return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, utilLog.Logger, cfg.Hedging) } return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging) From 9878c9715083227787f6bb66b936c3da82755608 Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Thu, 9 Nov 2023 09:34:15 +0000 Subject: [PATCH 03/38] Cleaned up code --- .../chunk/client/gcp/gcs_object_client.go | 2 -- .../client/gcp/gcs_thanos_object_client.go | 27 ++++++++++++------- pkg/storage/factory.go | 5 +++- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client.go b/pkg/storage/chunk/client/gcp/gcs_object_client.go index 5677ce4175f6f..d4b35e48d9df5 100644 --- a/pkg/storage/chunk/client/gcp/gcs_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_object_client.go @@ -19,7 +19,6 @@ import ( google_http "google.golang.org/api/transport/http" amnet "k8s.io/apimachinery/pkg/util/net" - "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/client" "github.com/grafana/loki/pkg/storage/chunk/client/hedging" "github.com/grafana/loki/pkg/storage/chunk/client/util" @@ -36,7 +35,6 @@ type GCSObjectClient struct { // GCSConfig is config for the GCS Chunk Client. type GCSConfig struct { - bCfg bucket.Config `yaml:"bucket_config"` BucketName string `yaml:"bucket_name"` ServiceAccount flagext.Secret `yaml:"service_account"` ChunkBufferSize int `yaml:"chunk_buffer_size"` diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index 4e04da51d8eb5..cc2294f31298f 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -23,13 +23,12 @@ type GCSThanosObjectClient struct { client objstore.Bucket } -func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, logger log.Logger, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { - return newGCSThanosObjectClient(ctx, cfg, logger) +func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { + return newGCSThanosObjectClient(ctx, cfg, component, logger) } -func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, logger log.Logger) (*GCSThanosObjectClient, error) { - // TODO (JoaoBraveCoding) accessing cfg.GCS directly - bucket, err := bucket.NewClient(ctx, cfg, cfg.GCS.BucketName, logger, prometheus.DefaultRegisterer) +func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger) (*GCSThanosObjectClient, error) { + bucket, err := bucket.NewClient(ctx, cfg, component, logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -38,9 +37,9 @@ func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, logger log }, nil } -func (s *GCSThanosObjectClient) Stop() { -} +func (s *GCSThanosObjectClient) Stop() {} +// ObjectExists checks if a given objectKey exists in the GCS bucket func (s *GCSThanosObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) { return s.client.Exists(ctx, objectKey) } @@ -56,21 +55,29 @@ func (s *GCSThanosObjectClient) GetObject(ctx context.Context, objectKey string) if err != nil { return nil, 0, err } - // TODO (JoaoBraveCoding) currently returning 0 for the int64 as no - return reader, 0, err + + attr, err := s.client.Attributes(ctx, objectKey) + if err != nil { + return nil, 0, errors.Wrapf(err, "failed to get attributes for %s", objectKey) + } + + return reader, attr.Size, err } -// List implements chunk.ObjectClient. +// List objects with given prefix. func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { var storageObjects []client.StorageObject var commonPrefixes []client.StorageCommonPrefix iterParams := objstore.IterOption(func(params *objstore.IterParams) {}) + // If delimiter is empty we want to list all files if delimiter == "" { iterParams = objstore.WithRecursiveIter } s.client.Iter(ctx, prefix, func(objectKey string) error { + // CommonPrefixes are keys that have the prefix and have the delimiter + // as a suffix if delimiter != "" && strings.HasSuffix(objectKey, delimiter) { commonPrefixes = append(commonPrefixes, client.StorageCommonPrefix(objectKey)) return nil diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index a78366062912a..7f736eff7d60f 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -716,7 +716,10 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric gcsCfg.EnableRetries = false } if cfg.ThanosObjStore { - return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, utilLog.Logger, cfg.Hedging) + // Passing "gcs" as the component name as currently it's not + // possible to get the component called this method + // TODO(JoaoBraveCoding) update compoent when bigger refactor happens + return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, "gcs", utilLog.Logger, cfg.Hedging) } return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging) From f6ea86d83e9ee2b934378adebc6b21061bce6de0 Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Thu, 9 Nov 2023 10:07:21 +0000 Subject: [PATCH 04/38] Change Iter options to be more standard --- pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index cc2294f31298f..a2fe73050ce07 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -68,11 +68,11 @@ func (s *GCSThanosObjectClient) GetObject(ctx context.Context, objectKey string) func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter string) ([]client.StorageObject, []client.StorageCommonPrefix, error) { var storageObjects []client.StorageObject var commonPrefixes []client.StorageCommonPrefix - iterParams := objstore.IterOption(func(params *objstore.IterParams) {}) + var iterParams []objstore.IterOption // If delimiter is empty we want to list all files if delimiter == "" { - iterParams = objstore.WithRecursiveIter + iterParams = append(iterParams, objstore.WithRecursiveIter) } s.client.Iter(ctx, prefix, func(objectKey string) error { @@ -94,7 +94,7 @@ func (s *GCSThanosObjectClient) List(ctx context.Context, prefix, delimiter stri return nil - }, iterParams) + }, iterParams...) return storageObjects, commonPrefixes, nil } From 0c88216cf4564c25911fc8163c78977ad9dd501c Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Thu, 9 Nov 2023 17:15:54 +0000 Subject: [PATCH 05/38] Allow support for ruler to use thanos/obj storage Signed-off-by: Joao Marcal --- pkg/loki/modules.go | 7 ++- pkg/ruler/base/ruler_test.go | 2 +- pkg/ruler/base/storage.go | 10 ++- pkg/storage/bucket/client.go | 83 ++++++++++++++++++++----- pkg/storage/bucket/filesystem/config.go | 8 ++- pkg/validation/limits.go | 21 +++++++ 6 files changed, 113 insertions(+), 18 deletions(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 4c14a4872655f..9bc969dc77452 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1036,7 +1036,12 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) { } } - t.RulerStorage, err = base_ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, t.Cfg.StorageConfig.Hedging, t.clientMetrics, ruler.GroupLoader{}, util_log.Logger) + overrides, err := validation.NewOverrides(t.Cfg.LimitsConfig, t.TenantLimits) + if err != nil { + return nil, err + } + + t.RulerStorage, err = base_ruler.NewLegacyRuleStore(t.Cfg.Ruler.StoreConfig, overrides, t.Cfg.StorageConfig.Hedging, t.clientMetrics, ruler.GroupLoader{}, util_log.Logger) return } diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go index 99839ed652536..ffa554c2cf902 100644 --- a/pkg/ruler/base/ruler_test.go +++ b/pkg/ruler/base/ruler_test.go @@ -205,7 +205,7 @@ func buildRuler(t *testing.T, rulerConfig Config, q storage.Querier, clientMetri require.NoError(t, rulerConfig.Validate(log.NewNopLogger())) engine, queryable, pusher, logger, overrides, reg := testSetup(t, q) - storage, err := NewLegacyRuleStore(rulerConfig.StoreConfig, hedging.Config{}, clientMetrics, promRules.FileLoader{}, log.NewNopLogger()) + storage, err := NewLegacyRuleStore(rulerConfig.StoreConfig, nil, hedging.Config{}, clientMetrics, promRules.FileLoader{}, log.NewNopLogger()) require.NoError(t, err) managerFactory := DefaultTenantManagerFactory(rulerConfig, pusher, queryable, engine, reg, constants.Loki) diff --git a/pkg/ruler/base/storage.go b/pkg/ruler/base/storage.go index 4a79fd5691221..5c887f64ad065 100644 --- a/pkg/ruler/base/storage.go +++ b/pkg/ruler/base/storage.go @@ -27,6 +27,7 @@ import ( "github.com/grafana/loki/pkg/storage/chunk/client/hedging" "github.com/grafana/loki/pkg/storage/chunk/client/ibmcloud" "github.com/grafana/loki/pkg/storage/chunk/client/openstack" + utilLog "github.com/grafana/loki/pkg/util/log" ) // RuleStoreConfig configures a rule store. @@ -44,6 +45,9 @@ type RuleStoreConfig struct { COS ibmcloud.COSConfig `yaml:"cos" doc:"description=Configures backend rule storage for IBM Cloud Object Storage (COS)."` Local local.Config `yaml:"local" doc:"description=Configures backend rule storage for a local file system directory."` + ThanosObjStore bool `yaml:"thanos_objstore"` + ObjStoreConf rulestore.Config `yaml:"objstore_config"` + mock rulestore.RuleStore `yaml:"-"` } @@ -82,7 +86,7 @@ func (cfg *RuleStoreConfig) IsDefaults() bool { // NewLegacyRuleStore returns a rule store backend client based on the provided cfg. // The client used by the function is based a legacy object store clients that shouldn't // be used anymore. -func NewLegacyRuleStore(cfg RuleStoreConfig, hedgeCfg hedging.Config, clientMetrics storage.ClientMetrics, loader promRules.GroupLoader, logger log.Logger) (rulestore.RuleStore, error) { +func NewLegacyRuleStore(cfg RuleStoreConfig, cfgProvider bucket.TenantConfigProvider, hedgeCfg hedging.Config, clientMetrics storage.ClientMetrics, loader promRules.GroupLoader, logger log.Logger) (rulestore.RuleStore, error) { if cfg.mock != nil { return cfg.mock, nil } @@ -94,6 +98,10 @@ func NewLegacyRuleStore(cfg RuleStoreConfig, hedgeCfg hedging.Config, clientMetr var err error var client client.ObjectClient + if cfg.ThanosObjStore { + return NewRuleStore(context.Background(), cfg.ObjStoreConf, cfgProvider, loader, utilLog.Logger, prometheus.DefaultRegisterer) + } + switch cfg.Type { case "azure": client, err = azure.NewBlobStorage(&cfg.Azure, clientMetrics.AzureMetrics, hedgeCfg) diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go index 57751afe36546..5f19b39548791 100644 --- a/pkg/storage/bucket/client.go +++ b/pkg/storage/bucket/client.go @@ -5,12 +5,13 @@ import ( "errors" "flag" "fmt" + "regexp" "strings" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" - opentracing "github.com/thanos-io/objstore/tracing/opentracing" + objstoretracing "github.com/thanos-io/objstore/tracing/opentracing" "github.com/grafana/loki/pkg/storage/bucket/azure" "github.com/grafana/loki/pkg/storage/bucket/filesystem" @@ -35,17 +36,22 @@ const ( // Filesystem is the value for the filesystem storage backend. Filesystem = "filesystem" + + // validPrefixCharactersRegex allows only alphanumeric characters to prevent subtle bugs and simplify validation + validPrefixCharactersRegex = `^[\da-zA-Z]+$` ) var ( SupportedBackends = []string{S3, GCS, Azure, Swift, Filesystem} - ErrUnsupportedStorageBackend = errors.New("unsupported storage backend") + ErrUnsupportedStorageBackend = errors.New("unsupported storage backend") + ErrInvalidCharactersInStoragePrefix = errors.New("storage prefix contains invalid characters, it may only contain digits and English alphabet letters") ) -// Config holds configuration for accessing long-term storage. -type Config struct { +// StorageBackendConfig holds configuration for accessing long-term storage. +type StorageBackendConfig struct { Backend string `yaml:"backend"` + // Backends S3 s3.Config `yaml:"s3"` GCS gcs.Config `yaml:"gcs"` @@ -63,26 +69,30 @@ type Config struct { } // Returns the supportedBackends for the package and any custom backends injected into the config. -func (cfg *Config) supportedBackends() []string { +func (cfg *StorageBackendConfig) supportedBackends() []string { return append(SupportedBackends, cfg.ExtraBackends...) } // RegisterFlags registers the backend storage config. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { +func (cfg *StorageBackendConfig) RegisterFlags(f *flag.FlagSet) { cfg.RegisterFlagsWithPrefix("", f) } -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { +func (cfg *StorageBackendConfig) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) { cfg.S3.RegisterFlagsWithPrefix(prefix, f) cfg.GCS.RegisterFlagsWithPrefix(prefix, f) cfg.Azure.RegisterFlagsWithPrefix(prefix, f) cfg.Swift.RegisterFlagsWithPrefix(prefix, f) - cfg.Filesystem.RegisterFlagsWithPrefix(prefix, f) + cfg.Filesystem.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir, f) - f.StringVar(&cfg.Backend, prefix+"backend", S3, fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(cfg.supportedBackends(), ", "))) + f.StringVar(&cfg.Backend, prefix+"backend", Filesystem, fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(cfg.supportedBackends(), ", "))) } -func (cfg *Config) Validate() error { +func (cfg *StorageBackendConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f) +} + +func (cfg *StorageBackendConfig) Validate() error { if !util.StringsContain(cfg.supportedBackends(), cfg.Backend) { return ErrUnsupportedStorageBackend } @@ -96,8 +106,49 @@ func (cfg *Config) Validate() error { return nil } +// Config holds configuration for accessing long-term storage. +type Config struct { + StorageBackendConfig `yaml:",inline"` + + StoragePrefix string `yaml:"storage_prefix"` + + // Not used internally, meant to allow callers to wrap Buckets + // created using this config + Middlewares []func(objstore.InstrumentedBucket) (objstore.InstrumentedBucket, error) `yaml:"-"` +} + +// RegisterFlags registers the backend storage config. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +func (cfg *Config) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) { + cfg.StorageBackendConfig.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir, f) + f.StringVar(&cfg.StoragePrefix, prefix+"storage-prefix", "", "Prefix for all objects stored in the backend storage. For simplicity, it may only contain digits and English alphabet letters.") +} + +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f) +} + +func (cfg *Config) Validate() error { + if cfg.StoragePrefix != "" { + acceptablePrefixCharacters := regexp.MustCompile(validPrefixCharactersRegex) + if !acceptablePrefixCharacters.MatchString(cfg.StoragePrefix) { + return ErrInvalidCharactersInStoragePrefix + } + } + + return cfg.StorageBackendConfig.Validate() +} + // NewClient creates a new bucket client based on the configured backend -func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) { +func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) { + var ( + client objstore.Bucket + err error + ) + switch cfg.Backend { case S3: client, err = s3.NewBucketClient(cfg.S3, name, logger) @@ -117,17 +168,21 @@ func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, return nil, err } - client = opentracing.WrapWithTraces(bucketWithMetrics(client, name, reg)) + if cfg.StoragePrefix != "" { + client = NewPrefixedBucketClient(client, cfg.StoragePrefix) + } + + instrumentedClient := objstoretracing.WrapWithTraces(bucketWithMetrics(client, name, reg)) // Wrap the client with any provided middleware for _, wrap := range cfg.Middlewares { - client, err = wrap(client) + instrumentedClient, err = wrap(instrumentedClient) if err != nil { return nil, err } } - return client, nil + return instrumentedClient, nil } func bucketWithMetrics(bucketClient objstore.Bucket, name string, reg prometheus.Registerer) objstore.Bucket { diff --git a/pkg/storage/bucket/filesystem/config.go b/pkg/storage/bucket/filesystem/config.go index 923923a032906..873a2eb1ba289 100644 --- a/pkg/storage/bucket/filesystem/config.go +++ b/pkg/storage/bucket/filesystem/config.go @@ -12,7 +12,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.RegisterFlagsWithPrefix("", f) } +// RegisterFlagsWithPrefixAndDefaultDirectory registers the flags for filesystem +// storage with the provided prefix and sets the default directory to dir. +func (cfg *Config) RegisterFlagsWithPrefixAndDefaultDirectory(prefix, dir string, f *flag.FlagSet) { + f.StringVar(&cfg.Directory, prefix+"filesystem.dir", dir, "Local filesystem storage directory.") +} + // RegisterFlagsWithPrefix registers the flags for filesystem storage with the provided prefix func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.Directory, prefix+"filesystem.dir", "", "Local filesystem storage directory.") + cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "", f) } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 823b23dd93115..54077bb4f7c64 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -167,6 +167,12 @@ type Limits struct { PerTenantOverrideConfig string `yaml:"per_tenant_override_config" json:"per_tenant_override_config"` PerTenantOverridePeriod model.Duration `yaml:"per_tenant_override_period" json:"per_tenant_override_period"` + // This config doesn't have a CLI flag registered here because they're registered in + // their own original config struct. + S3SSEType string `yaml:"s3_sse_type" json:"s3_sse_type" doc:"nocli|description=S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used."` + S3SSEKMSKeyID string `yaml:"s3_sse_kms_key_id" json:"s3_sse_kms_key_id" doc:"nocli|description=S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set."` + S3SSEKMSEncryptionContext string `yaml:"s3_sse_kms_encryption_context" json:"s3_sse_kms_encryption_context" doc:"nocli|description=S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set."` + // Deprecated CompactorDeletionEnabled bool `yaml:"allow_deletes" json:"allow_deletes" doc:"deprecated|description=Use deletion_mode per tenant configuration instead."` @@ -610,6 +616,21 @@ func (o *Overrides) RulerRemoteWriteDisabled(userID string) bool { return o.getOverridesForUser(userID).RulerRemoteWriteDisabled } +// S3SSEType returns the per-tenant S3 SSE type. +func (o *Overrides) S3SSEType(user string) string { + return o.getOverridesForUser(user).S3SSEType +} + +// S3SSEKMSKeyID returns the per-tenant S3 KMS-SSE key id. +func (o *Overrides) S3SSEKMSKeyID(user string) string { + return o.getOverridesForUser(user).S3SSEKMSKeyID +} + +// S3SSEKMSEncryptionContext returns the per-tenant S3 KMS-SSE encryption context. +func (o *Overrides) S3SSEKMSEncryptionContext(user string) string { + return o.getOverridesForUser(user).S3SSEKMSEncryptionContext +} + // Deprecated: use RulerRemoteWriteConfig instead // RulerRemoteWriteURL returns the remote-write URL to use for a given user. func (o *Overrides) RulerRemoteWriteURL(userID string) string { From 184ea749d81486cb24103f353bb8877269bb6a72 Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Thu, 16 Nov 2023 10:35:55 +0000 Subject: [PATCH 06/38] Add thanos/objstore to common configuration --- pkg/loki/common/common.go | 7 +++++++ pkg/loki/config_wrapper.go | 15 +++++++++++++++ .../chunk/client/gcp/gcs_thanos_object_client.go | 9 +++++---- pkg/storage/factory.go | 5 +++-- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/pkg/loki/common/common.go b/pkg/loki/common/common.go index 6f7fd1c768edf..e9d5a017c03a2 100644 --- a/pkg/loki/common/common.go +++ b/pkg/loki/common/common.go @@ -6,6 +6,7 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/netutil" + "github.com/grafana/loki/pkg/storage/bucket" "github.com/grafana/loki/pkg/storage/chunk/client/alibaba" "github.com/grafana/loki/pkg/storage/chunk/client/aws" "github.com/grafana/loki/pkg/storage/chunk/client/azure" @@ -58,6 +59,8 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Ring.RegisterFlagsWithPrefix("common.storage.", "collectors/", f) c.Ring.RegisterFlagsWithPrefix("common.storage.", "collectors/", throwaway) + f.BoolVar(&c.Storage.ThanosObjStore, "common.thanos.enable", false, "Enable the thanos.io/objstore to be the backend for object storage") + // instance related flags. c.InstanceInterfaceNames = netutil.PrivateNetworkInterfacesWithFallback([]string{"eth0", "en0"}, util_log.Logger) throwaway.StringVar(&c.InstanceAddr, "common.instance-addr", "", "Default advertised address to be used by Loki components.") @@ -78,6 +81,8 @@ type Storage struct { Hedging hedging.Config `yaml:"hedging"` COS ibmcloud.COSConfig `yaml:"cos"` CongestionControl congestion.Config `yaml:"congestion_control,omitempty"` + ThanosObjStore bool `yaml:"thanos_objstore"` + ObjStoreConf bucket.Config `yaml:"objstore_config"` } func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { @@ -91,6 +96,8 @@ func (s *Storage) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { s.Hedging.RegisterFlagsWithPrefix(prefix, f) s.COS.RegisterFlagsWithPrefix(prefix, f) s.CongestionControl.RegisterFlagsWithPrefix(prefix, f) + + s.ObjStoreConf.RegisterFlagsWithPrefix(prefix+"thanos.", f) } type FilesystemConfig struct { diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index 41a87775a9ecc..f236299d3b056 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/loki/pkg/util/cfg" lokiring "github.com/grafana/loki/pkg/util/ring" + "github.com/grafana/loki/pkg/ruler/rulestore" "github.com/grafana/loki/pkg/ruler/rulestore/local" loki_net "github.com/grafana/loki/pkg/util/net" ) @@ -564,6 +565,20 @@ func applyStorageConfig(cfg, defaults *ConfigWrapper) error { } } + if !reflect.DeepEqual(cfg.Common.Storage.ObjStoreConf, defaults.StorageConfig.ObjStoreConf) { + configsFound++ + + applyConfig = func(r *ConfigWrapper) { + r.Ruler.StoreConfig.ThanosObjStore = r.Common.Storage.ThanosObjStore + r.Ruler.StoreConfig.ObjStoreConf = rulestore.Config{ + Config: r.Common.Storage.ObjStoreConf, + } + r.StorageConfig.ThanosObjStore = r.Common.Storage.ThanosObjStore + r.StorageConfig.ObjStoreConf = r.Common.Storage.ObjStoreConf + r.StorageConfig.Hedging = r.Common.Storage.Hedging + } + } + if configsFound > 1 { return ErrTooManyStorageConfigs } diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index a2fe73050ce07..da72b865dd6fc 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -23,12 +23,13 @@ type GCSThanosObjectClient struct { client objstore.Bucket } -func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config) (*GCSThanosObjectClient, error) { - return newGCSThanosObjectClient(ctx, cfg, component, logger) +func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config, reg prometheus.Registerer) (*GCSThanosObjectClient, error) { + //TODO(JoaoBraveCoding) Add Hedging client once we are able to configure HTTP on GCS provider + return newGCSThanosObjectClient(ctx, cfg, component, logger, reg) } -func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger) (*GCSThanosObjectClient, error) { - bucket, err := bucket.NewClient(ctx, cfg, component, logger, prometheus.DefaultRegisterer) +func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, reg prometheus.Registerer) (*GCSThanosObjectClient, error) { + bucket, err := bucket.NewClient(ctx, cfg, component, logger, reg) if err != nil { return nil, err } diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 7f736eff7d60f..538ab9bc2c259 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -365,7 +365,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Hedging.RegisterFlagsWithPrefix("store.", f) cfg.CongestionControl.RegisterFlagsWithPrefix("store.", f) - cfg.ObjStoreConf.RegisterFlags(f) + f.BoolVar(&cfg.ThanosObjStore, "thanos.enable", false, "Enable the thanos.io/objstore to be the backend for object storage") + cfg.ObjStoreConf.RegisterFlagsWithPrefix("thanos.", f) cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "", f) f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") @@ -719,7 +720,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric // Passing "gcs" as the component name as currently it's not // possible to get the component called this method // TODO(JoaoBraveCoding) update compoent when bigger refactor happens - return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, "gcs", utilLog.Logger, cfg.Hedging) + return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, "gcs", utilLog.Logger, cfg.Hedging, prometheus.WrapRegistererWithPrefix("loki_", prometheus.NewRegistry())) } return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging) From e816a3c6cc8687631fac3c36f9d850b6dce5f425 Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Tue, 21 Nov 2023 10:22:33 +0000 Subject: [PATCH 07/38] Fix metric registration by solving the "component" label conflict --- pkg/bloomcompactor/bloomcompactor.go | 2 +- pkg/logcli/query/query.go | 3 +- pkg/loki/modules.go | 12 +++---- pkg/storage/bucket/client.go | 5 ++- pkg/storage/factory.go | 34 ++++++++++--------- pkg/storage/factory_test.go | 9 ++--- pkg/storage/store.go | 20 ++++------- .../stores/shipper/bloomshipper/client.go | 3 +- .../boltdb/compactor/util_test.go | 5 +-- tools/tsdb/bloom-tester/lib.go | 2 +- tools/tsdb/bloom-tester/readlib.go | 2 +- tools/tsdb/index-analyzer/main.go | 2 +- tools/tsdb/migrate-versions/main.go | 2 +- tools/tsdb/migrate-versions/main_test.go | 3 +- 14 files changed, 52 insertions(+), 52 deletions(-) diff --git a/pkg/bloomcompactor/bloomcompactor.go b/pkg/bloomcompactor/bloomcompactor.go index b517957833a94..6481f31c8e9ca 100644 --- a/pkg/bloomcompactor/bloomcompactor.go +++ b/pkg/bloomcompactor/bloomcompactor.go @@ -131,7 +131,7 @@ func New( } // Configure ObjectClient and IndexShipper for series and chunk management - objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageCfg, clientMetrics) + objectClient, err := storage.NewObjectClient("bloom-compactor", periodicConfig.ObjectType, storageCfg, clientMetrics, r) if err != nil { return nil, fmt.Errorf("error creating object client '%s': %w", periodicConfig.ObjectType, err) } diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index 6a71f0979abcf..e7c0a02769091 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -497,10 +497,11 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string func GetObjectClient(store string, conf loki.Config, cm storage.ClientMetrics) (chunk.ObjectClient, error) { oc, err := storage.NewObjectClient( + "log-cli-query", store, conf.StorageConfig, cm, - ) + prometheus.DefaultRegisterer) if err != nil { return nil, err } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 9bc969dc77452..8424c6add2f6f 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -607,9 +607,7 @@ func (t *Loki) initTableManager() (services.Service, error) { os.Exit(1) } - reg := prometheus.WrapRegistererWith(prometheus.Labels{"component": "table-manager-store"}, prometheus.DefaultRegisterer) - - tableClient, err := storage.NewTableClient(lastConfig.IndexType, *lastConfig, t.Cfg.StorageConfig, t.clientMetrics, reg, util_log.Logger) + tableClient, err := storage.NewTableClient("table-manager-store", lastConfig.IndexType, *lastConfig, t.Cfg.StorageConfig, t.clientMetrics, prometheus.DefaultRegisterer, util_log.Logger) if err != nil { return nil, err } @@ -1215,7 +1213,7 @@ func (t *Loki) initCompactor() (services.Service, error) { continue } - objectClient, err := storage.NewObjectClient(periodConfig.ObjectType, t.Cfg.StorageConfig, t.clientMetrics) + objectClient, err := storage.NewObjectClient("compactor", periodConfig.ObjectType, t.Cfg.StorageConfig, t.clientMetrics, prometheus.DefaultRegisterer) if err != nil { return nil, fmt.Errorf("failed to create object client: %w", err) } @@ -1226,7 +1224,7 @@ func (t *Loki) initCompactor() (services.Service, error) { var deleteRequestStoreClient client.ObjectClient if t.Cfg.CompactorConfig.RetentionEnabled { if deleteStore := t.Cfg.CompactorConfig.DeleteRequestStore; deleteStore != "" { - if deleteRequestStoreClient, err = storage.NewObjectClient(deleteStore, t.Cfg.StorageConfig, t.clientMetrics); err != nil { + if deleteRequestStoreClient, err = storage.NewObjectClient("compactor", deleteStore, t.Cfg.StorageConfig, t.clientMetrics, prometheus.DefaultRegisterer); err != nil { return nil, fmt.Errorf("failed to create delete request store object client: %w", err) } } else { @@ -1319,7 +1317,7 @@ func (t *Loki) initIndexGateway() (services.Service, error) { } tableRange := period.GetIndexTableNumberRange(periodEndTime) - indexClient, err := storage.NewIndexClient(period, tableRange, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.Overrides, t.clientMetrics, shardingStrategy, + indexClient, err := storage.NewIndexClient("index-gateway", period, tableRange, t.Cfg.StorageConfig, t.Cfg.SchemaConfig, t.Overrides, t.clientMetrics, shardingStrategy, prometheus.DefaultRegisterer, log.With(util_log.Logger, "index-store", fmt.Sprintf("%s-%s", period.IndexType, period.From.String())), t.Cfg.MetricsNamespace, ) if err != nil { @@ -1519,7 +1517,7 @@ func (t *Loki) initAnalytics() (services.Service, error) { return nil, err } - objectClient, err := storage.NewObjectClient(period.ObjectType, t.Cfg.StorageConfig, t.clientMetrics) + objectClient, err := storage.NewObjectClient("analytics", period.ObjectType, t.Cfg.StorageConfig, t.clientMetrics, prometheus.DefaultRegisterer) if err != nil { level.Info(util_log.Logger).Log("msg", "failed to initialize usage report", "err", err) return nil, nil diff --git a/pkg/storage/bucket/client.go b/pkg/storage/bucket/client.go index 5f19b39548791..6490baf764979 100644 --- a/pkg/storage/bucket/client.go +++ b/pkg/storage/bucket/client.go @@ -190,8 +190,11 @@ func bucketWithMetrics(bucketClient objstore.Bucket, name string, reg prometheus return bucketClient } + reg = prometheus.WrapRegistererWithPrefix("loki_", reg) + reg = prometheus.WrapRegistererWith(prometheus.Labels{"component": name}, reg) + return objstore.WrapWithMetrics( bucketClient, - prometheus.WrapRegistererWith(prometheus.Labels{"component": name}, reg), + reg, "") } diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 538ab9bc2c259..6237b07cd9ea4 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -413,7 +413,7 @@ func (cfg *Config) Validate() error { } // NewIndexClient creates a new index client of the desired type specified in the PeriodConfig -func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, shardingStrategy indexgateway.ShardingStrategy, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (index.Client, error) { +func NewIndexClient(component string, periodCfg config.PeriodConfig, tableRange config.TableRange, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, shardingStrategy indexgateway.ShardingStrategy, registerer prometheus.Registerer, logger log.Logger, metricsNamespace string) (index.Client, error) { switch true { case util.StringsContain(testingStorageTypes, periodCfg.IndexType): @@ -444,7 +444,7 @@ func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange, return client, nil } - objectClient, err := NewObjectClient(periodCfg.ObjectType, cfg, cm) + objectClient, err := NewObjectClient(component, periodCfg.ObjectType, cfg, cm, registerer) if err != nil { return nil, err } @@ -505,7 +505,7 @@ func NewIndexClient(periodCfg config.PeriodConfig, tableRange config.TableRange, } // NewChunkClient makes a new chunk.Client of the desired types. -func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc congestion.Controller, registerer prometheus.Registerer, clientMetrics ClientMetrics, logger log.Logger) (client.Client, error) { +func NewChunkClient(component, name string, cfg Config, schemaCfg config.SchemaConfig, cc congestion.Controller, registerer prometheus.Registerer, clientMetrics ClientMetrics, logger log.Logger) (client.Client, error) { var storeType = name // lookup storeType for named stores @@ -518,7 +518,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c case util.StringsContain(testingStorageTypes, storeType): switch storeType { case config.StorageTypeInMemory: - c, err := NewObjectClient(name, cfg, clientMetrics) + c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) if err != nil { return nil, err } @@ -528,21 +528,21 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c case util.StringsContain(supportedStorageTypes, storeType): switch storeType { case config.StorageTypeFileSystem: - c, err := NewObjectClient(name, cfg, clientMetrics) + c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) if err != nil { return nil, err } return client.NewClientWithMaxParallel(c, client.FSEncoder, cfg.MaxParallelGetChunk, schemaCfg), nil case config.StorageTypeAWS, config.StorageTypeS3, config.StorageTypeAzure, config.StorageTypeBOS, config.StorageTypeSwift, config.StorageTypeCOS, config.StorageTypeAlibabaCloud: - c, err := NewObjectClient(name, cfg, clientMetrics) + c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) if err != nil { return nil, err } return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil case config.StorageTypeGCS: - c, err := NewObjectClient(name, cfg, clientMetrics) + c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) if err != nil { return nil, err } @@ -583,7 +583,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, cc c } // NewTableClient makes a new table client based on the configuration. -func NewTableClient(name string, periodCfg config.PeriodConfig, cfg Config, cm ClientMetrics, registerer prometheus.Registerer, logger log.Logger) (index.TableClient, error) { +func NewTableClient(component, name string, periodCfg config.PeriodConfig, cfg Config, cm ClientMetrics, registerer prometheus.Registerer, logger log.Logger) (index.TableClient, error) { switch true { case util.StringsContain(testingStorageTypes, name): switch name { @@ -592,7 +592,7 @@ func NewTableClient(name string, periodCfg config.PeriodConfig, cfg Config, cm C } case util.StringsContain(supportedIndexTypes, name): - objectClient, err := NewObjectClient(periodCfg.ObjectType, cfg, cm) + objectClient, err := NewObjectClient(component, periodCfg.ObjectType, cfg, cm, registerer) if err != nil { return nil, err } @@ -647,8 +647,8 @@ func (c *ClientMetrics) Unregister() { } // NewObjectClient makes a new StorageClient with the prefix in the front. -func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) { - actual, err := internalNewObjectClient(name, cfg, clientMetrics) +func NewObjectClient(component, name string, cfg Config, clientMetrics ClientMetrics, reg prometheus.Registerer) (client.ObjectClient, error) { + actual, err := internalNewObjectClient(component, name, cfg, clientMetrics, reg) if err != nil { return nil, err } @@ -662,12 +662,17 @@ func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (clie } // internalNewObjectClient makes the underlying StorageClient of the desired types. -func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) { +func internalNewObjectClient(component, name string, cfg Config, clientMetrics ClientMetrics, reg prometheus.Registerer) (client.ObjectClient, error) { var ( namedStore string storeType = name ) + // preserve olf reg behaviour + if !cfg.ThanosObjStore { + reg = prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, reg) + } + // lookup storeType for named stores if nsType, ok := cfg.NamedStores.storeType[name]; ok { storeType = nsType @@ -717,10 +722,7 @@ func internalNewObjectClient(name string, cfg Config, clientMetrics ClientMetric gcsCfg.EnableRetries = false } if cfg.ThanosObjStore { - // Passing "gcs" as the component name as currently it's not - // possible to get the component called this method - // TODO(JoaoBraveCoding) update compoent when bigger refactor happens - return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, "gcs", utilLog.Logger, cfg.Hedging, prometheus.WrapRegistererWithPrefix("loki_", prometheus.NewRegistry())) + return gcp.NewGCSThanosObjectClient(context.Background(), cfg.ObjStoreConf, component, utilLog.Logger, cfg.Hedging, reg) } return gcp.NewGCSObjectClient(context.Background(), gcsCfg, cfg.Hedging) diff --git a/pkg/storage/factory_test.go b/pkg/storage/factory_test.go index 2588c9dc69dd1..242f99abc9bf3 100644 --- a/pkg/storage/factory_test.go +++ b/pkg/storage/factory_test.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/flagext" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -233,7 +234,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { var cfg Config flagext.DefaultValues(&cfg) - objectClient, err := NewObjectClient("inmemory", cfg, cm) + objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) require.NoError(t, err) _, ok := objectClient.(client.PrefixedObjectClient) @@ -245,7 +246,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "my/prefix/" - objectClient, err := NewObjectClient("inmemory", cfg, cm) + objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) @@ -258,7 +259,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "my/prefix" - objectClient, err := NewObjectClient("inmemory", cfg, cm) + objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) @@ -271,7 +272,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "/my/prefix/" - objectClient, err := NewObjectClient("inmemory", cfg, cm) + objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 6781dbbff8a3b..a71af3dc3d789 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -216,9 +216,6 @@ func (s *LokiStore) chunkClientForPeriod(p config.PeriodConfig) (client.Client, if objectStoreType == "" { objectStoreType = p.IndexType } - chunkClientReg := prometheus.WrapRegistererWith( - prometheus.Labels{"component": "chunk-store-" + p.From.String()}, s.registerer) - var cc congestion.Controller ccCfg := s.cfg.CongestionControl @@ -230,7 +227,8 @@ func (s *LokiStore) chunkClientForPeriod(p config.PeriodConfig) (client.Client, ) } - chunks, err := NewChunkClient(objectStoreType, s.cfg, s.schemaCfg, cc, chunkClientReg, s.clientMetrics, s.logger) + component := "chunk-store-" + p.From.String() + chunks, err := NewChunkClient(component, objectStoreType, s.cfg, s.schemaCfg, cc, s.registerer, s.clientMetrics, s.logger) if err != nil { return nil, errors.Wrap(err, "error creating object client") } @@ -253,14 +251,8 @@ func shouldUseIndexGatewayClient(cfg indexshipper.Config) bool { } func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.TableRange, chunkClient client.Client, f *fetcher.Fetcher) (stores.ChunkWriter, index.ReaderWriter, func(), error) { - indexClientReg := prometheus.WrapRegistererWith( - prometheus.Labels{ - "component": fmt.Sprintf( - "index-store-%s-%s", - p.IndexType, - p.From.String(), - ), - }, s.registerer) + component := fmt.Sprintf("index-store-%s-%s", p.IndexType, p.From.String()) + indexClientReg := prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, s.registerer) indexClientLogger := log.With(s.logger, "index-store", fmt.Sprintf("%s-%s", p.IndexType, p.From.String())) if p.IndexType == config.TSDBType { @@ -278,7 +270,7 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl }, nil } - objectClient, err := NewObjectClient(p.ObjectType, s.cfg, s.clientMetrics) + objectClient, err := NewObjectClient(component, p.ObjectType, s.cfg, s.clientMetrics, s.registerer) if err != nil { return nil, nil, nil, err } @@ -301,7 +293,7 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl }, nil } - idx, err := NewIndexClient(p, tableRange, s.cfg, s.schemaCfg, s.limits, s.clientMetrics, nil, indexClientReg, indexClientLogger, s.metricsNamespace) + idx, err := NewIndexClient(component, p, tableRange, s.cfg, s.schemaCfg, s.limits, s.clientMetrics, nil, s.registerer, indexClientLogger, s.metricsNamespace) if err != nil { return nil, nil, nil, errors.Wrap(err, "error creating index client") } diff --git a/pkg/storage/stores/shipper/bloomshipper/client.go b/pkg/storage/stores/shipper/bloomshipper/client.go index a68959e1d908e..cc4d282b48efb 100644 --- a/pkg/storage/stores/shipper/bloomshipper/client.go +++ b/pkg/storage/stores/shipper/bloomshipper/client.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/grafana/dskit/concurrency" @@ -94,7 +95,7 @@ type Client interface { func NewBloomClient(periodicConfigs []config.PeriodConfig, storageConfig storage.Config, clientMetrics storage.ClientMetrics) (*BloomClient, error) { periodicObjectClients := make(map[config.DayTime]client.ObjectClient) for _, periodicConfig := range periodicConfigs { - objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics) + objectClient, err := storage.NewObjectClient("bloom-shipper", periodicConfig.ObjectType, storageConfig, clientMetrics, prometheus.DefaultRegisterer) if err != nil { return nil, fmt.Errorf("error creating object client '%s': %w", periodicConfig.ObjectType, err) } diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go index 12dbfb8d9a7df..a716f5522041e 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go @@ -10,6 +10,7 @@ import ( ww "github.com/grafana/dskit/server" "github.com/grafana/dskit/user" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -128,11 +129,11 @@ type testObjectClient struct { } func newTestObjectClient(path string, clientMetrics storage.ClientMetrics) client.ObjectClient { - c, err := storage.NewObjectClient("filesystem", storage.Config{ + c, err := storage.NewObjectClient("compactor", "filesystem", storage.Config{ FSConfig: local.FSConfig{ Directory: path, }, - }, clientMetrics) + }, clientMetrics, prometheus.NewRegistry()) if err != nil { panic(err) } diff --git a/tools/tsdb/bloom-tester/lib.go b/tools/tsdb/bloom-tester/lib.go index 7eefb56342c40..d46f043c86124 100644 --- a/tools/tsdb/bloom-tester/lib.go +++ b/tools/tsdb/bloom-tester/lib.go @@ -47,7 +47,7 @@ func execute() { periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) helpers.ExitErr("find period config for bucket", err) - objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) + objectClient, err := storage.NewObjectClient("bloom-tester", periodCfg.ObjectType, conf.StorageConfig, clientMetrics, prometheus.DefaultRegisterer) helpers.ExitErr("creating object client", err) chunkClient := client.NewClient(objectClient, nil, conf.SchemaConfig) diff --git a/tools/tsdb/bloom-tester/readlib.go b/tools/tsdb/bloom-tester/readlib.go index eaca7a38c15bd..3008043f8b9c6 100644 --- a/tools/tsdb/bloom-tester/readlib.go +++ b/tools/tsdb/bloom-tester/readlib.go @@ -66,7 +66,7 @@ func executeRead() { periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) helpers.ExitErr("find period config for bucket", err) - objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) + objectClient, err := storage.NewObjectClient("bloom-tester", periodCfg.ObjectType, conf.StorageConfig, clientMetrics, prometheus.DefaultRegisterer) helpers.ExitErr("creating object client", err) chunkClient := client.NewClient(objectClient, nil, conf.SchemaConfig) diff --git a/tools/tsdb/index-analyzer/main.go b/tools/tsdb/index-analyzer/main.go index fd59bd4792fdf..87162630675b2 100644 --- a/tools/tsdb/index-analyzer/main.go +++ b/tools/tsdb/index-analyzer/main.go @@ -24,7 +24,7 @@ func main() { periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) helpers.ExitErr("find period config for bucket", err) - objectClient, err := storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) + objectClient, err := storage.NewObjectClient("index-analyzer", periodCfg.ObjectType, conf.StorageConfig, clientMetrics, prometheus.DefaultRegisterer) helpers.ExitErr("creating object client", err) shipper, err := indexshipper.NewIndexShipper( diff --git a/tools/tsdb/migrate-versions/main.go b/tools/tsdb/migrate-versions/main.go index b458c80d4c1b8..bb9c3b8f23bcd 100644 --- a/tools/tsdb/migrate-versions/main.go +++ b/tools/tsdb/migrate-versions/main.go @@ -97,7 +97,7 @@ func main() { } func migrateTables(pCfg config.PeriodConfig, storageCfg storage.Config, clientMetrics storage.ClientMetrics, tableRange config.TableRange) error { - objClient, err := storage.NewObjectClient(pCfg.ObjectType, storageCfg, clientMetrics) + objClient, err := storage.NewObjectClient("tables-migration-tool", pCfg.ObjectType, storageCfg, clientMetrics, prometheus.DefaultRegisterer) if err != nil { return err } diff --git a/tools/tsdb/migrate-versions/main_test.go b/tools/tsdb/migrate-versions/main_test.go index 2f4690fde0a7e..cfadbdd526618 100644 --- a/tools/tsdb/migrate-versions/main_test.go +++ b/tools/tsdb/migrate-versions/main_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -52,7 +53,7 @@ func TestMigrateTables(t *testing.T) { } clientMetrics := storage.NewClientMetrics() - objClient, err := storage.NewObjectClient(pcfg.ObjectType, storageCfg, clientMetrics) + objClient, err := storage.NewObjectClient("tables-migration-tool", pcfg.ObjectType, storageCfg, clientMetrics, prometheus.DefaultRegisterer) require.NoError(t, err) indexStorageClient := shipperstorage.NewIndexStorageClient(objClient, pcfg.IndexTables.PathPrefix) From 32a9e4c6945e2bc1f28c9deaaa529114bd77fb7f Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Mon, 18 Mar 2024 15:28:56 +0000 Subject: [PATCH 08/38] feat(storage): implements hedging for thanos gcs client --- docs/sources/configure/_index.md | 154 +- go.mod | 65 +- go.sum | 136 +- pkg/storage/bucket/gcs/config.go | 14 + pkg/storage/bucket/http/config.go | 8 +- .../client/gcp/gcs_thanos_object_client.go | 30 +- .../gcp/gcs_thanos_object_client_test.go | 3 +- .../.release-please-manifest-individual.json | 18 +- .../.release-please-manifest-submodules.json | 253 +- .../go/.release-please-manifest.json | 2 +- vendor/cloud.google.com/go/CHANGES.md | 31 + vendor/cloud.google.com/go/CONTRIBUTING.md | 4 + vendor/cloud.google.com/go/README.md | 64 +- .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/iam/CHANGES.md | 28 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 2 +- .../go/iam/apiv1/iampb/options.pb.go | 2 +- .../go/iam/apiv1/iampb/policy.pb.go | 20 +- .../go/internal/.repo-metadata-full.json | 142 +- .../go/internal/pubsub/message.go | 14 +- .../go/longrunning/CHANGES.md | 21 + .../go/longrunning/autogen/doc.go | 45 +- .../autogen/longrunningpb/operations.pb.go | 2 +- .../longrunning/autogen/operations_client.go | 57 +- .../go/release-please-config-individual.json | 8 +- ...elease-please-config-yoshi-submodules.json | 9 + vendor/cloud.google.com/go/storage/CHANGES.md | 94 + vendor/cloud.google.com/go/storage/acl.go | 2 +- vendor/cloud.google.com/go/storage/bucket.go | 97 +- vendor/cloud.google.com/go/storage/doc.go | 39 + .../go/storage/grpc_client.go | 513 +- vendor/cloud.google.com/go/storage/hmac.go | 41 +- .../go/storage/http_client.go | 194 +- .../go/storage/internal/apiv2/auxiliary.go | 210 + .../go/storage/internal/apiv2/doc.go | 124 +- .../internal/apiv2/gapic_metadata.json | 10 + .../go/storage/internal/apiv2/metadata.go | 26 - .../storage/internal/apiv2/storage_client.go | 908 +- .../apiv2/{stubs => storagepb}/storage.pb.go | 5115 +++--- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 52 +- .../go/storage/notifications.go | 2 +- vendor/cloud.google.com/go/storage/option.go | 2 +- .../go/storage/post_policy_v4.go | 13 +- vendor/cloud.google.com/go/storage/reader.go | 28 +- vendor/cloud.google.com/go/storage/storage.go | 98 +- vendor/cloud.google.com/go/storage/writer.go | 5 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 134 + .../internal/resource/resource_identifier.go | 224 + .../arm/internal/resource/resource_type.go | 114 + .../sdk/azcore/arm/policy/policy.go | 98 + .../sdk/azcore/arm/runtime/pipeline.go | 65 + .../azcore/arm/runtime/policy_bearer_token.go | 145 + .../azcore/arm/runtime/policy_register_rp.go | 347 + .../arm/runtime/policy_trace_namespace.go | 30 + .../sdk/azcore/arm/runtime/runtime.go | 24 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 60 +- .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 7 + .../sdk/azcore/internal/exported/exported.go | 108 + .../sdk/azcore/internal/exported/pipeline.go | 20 - .../sdk/azcore/internal/exported/request.go | 117 +- .../internal/exported/response_error.go | 51 +- .../sdk/azcore/internal/pollers/fake/fake.go | 133 + .../sdk/azcore/internal/shared/constants.go | 14 +- .../sdk/azcore/internal/shared/shared.go | 110 +- .../sdk/azcore/policy/policy.go | 27 +- .../sdk/azcore/runtime/pager.go | 59 +- .../sdk/azcore/runtime/pipeline.go | 36 +- .../sdk/azcore/runtime/policy_bearer_token.go | 39 +- .../sdk/azcore/runtime/policy_http_header.go | 3 +- .../sdk/azcore/runtime/policy_http_trace.go | 143 + .../azcore/runtime/policy_include_response.go | 5 +- .../azcore/runtime/policy_key_credential.go | 57 + .../sdk/azcore/runtime/policy_logging.go | 3 +- .../sdk/azcore/runtime/policy_retry.go | 13 +- .../azcore/runtime/policy_sas_credential.go | 47 + .../sdk/azcore/runtime/policy_telemetry.go | 4 + .../sdk/azcore/runtime/poller.go | 103 +- .../sdk/azcore/runtime/request.go | 117 +- .../sdk/azcore/runtime/response.go | 30 +- .../runtime/transport_default_dialer_other.go | 15 + .../runtime/transport_default_dialer_wasm.go | 15 + .../runtime/transport_default_http_client.go | 17 +- .../sdk/azcore/tracing/tracing.go | 61 +- .../sdk/azidentity/CHANGELOG.md | 87 +- .../sdk/azidentity/MIGRATION.md | 6 +- .../azure-sdk-for-go/sdk/azidentity/README.md | 40 +- .../sdk/azidentity/TOKEN_CACHING.MD | 70 + .../sdk/azidentity/TROUBLESHOOTING.md | 48 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/authentication_record.go | 95 + .../sdk/azidentity/azidentity.go | 137 +- .../sdk/azidentity/azure_cli_credential.go | 159 +- .../azure_developer_cli_credential.go | 169 + .../azure-sdk-for-go/sdk/azidentity/ci.yml | 26 +- .../azidentity/client_assertion_credential.go | 44 +- .../client_certificate_credential.go | 45 +- .../azidentity/client_secret_credential.go | 42 +- .../sdk/azidentity/confidential_client.go | 184 + .../azidentity/default_azure_credential.go | 45 +- .../azidentity/developer_credential_util.go | 38 + .../sdk/azidentity/device_code_credential.go | 96 +- .../sdk/azidentity/environment_credential.go | 4 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 27 +- .../interactive_browser_credential.go | 78 +- .../sdk/azidentity/internal/exported.go | 18 + .../sdk/azidentity/internal/internal.go | 31 + .../sdk/azidentity/managed_identity_client.go | 82 +- .../azidentity/managed_identity_credential.go | 40 +- .../sdk/azidentity/on_behalf_of_credential.go | 42 +- .../sdk/azidentity/public_client.go | 273 + .../azure-sdk-for-go/sdk/azidentity/syncer.go | 130 - .../sdk/azidentity/test-resources-pre.ps1 | 36 + .../sdk/azidentity/test-resources.bicep | 1 + .../username_password_credential.go | 63 +- .../sdk/azidentity/version.go | 5 +- .../sdk/azidentity/workload_identity.go | 15 +- .../sdk/internal/errorinfo/errorinfo.go | 30 + .../sdk/internal/exported/exported.go | 5 + .../sdk/storage/azblob/CHANGELOG.md | 182 + .../sdk/storage/azblob/README.md | 148 +- .../sdk/storage/azblob/appendblob/client.go | 150 +- .../sdk/storage/azblob/appendblob/models.go | 76 +- .../storage/azblob/appendblob/responses.go | 3 + .../sdk/storage/azblob/assets.json | 6 + .../sdk/storage/azblob/blob/client.go | 305 +- .../sdk/storage/azblob/blob/constants.go | 92 +- .../sdk/storage/azblob/blob/models.go | 172 +- .../sdk/storage/azblob/blob/responses.go | 27 +- .../sdk/storage/azblob/blob/retry_reader.go | 4 +- .../sdk/storage/azblob/blob/utils.go | 18 +- .../storage/azblob/bloberror/error_codes.go | 9 + .../storage/azblob/blockblob/chunkwriting.go | 331 +- .../sdk/storage/azblob/blockblob/client.go | 210 +- .../sdk/storage/azblob/blockblob/constants.go | 16 +- .../sdk/storage/azblob/blockblob/models.go | 288 +- .../sdk/storage/azblob/blockblob/responses.go | 8 +- .../sdk/storage/azblob/ci.yml | 6 + .../sdk/storage/azblob/client.go | 29 +- .../sdk/storage/azblob/common.go | 2 +- .../sdk/storage/azblob/constants.go | 4 +- .../storage/azblob/container/batch_builder.go | 94 + .../sdk/storage/azblob/container/client.go | 207 +- .../sdk/storage/azblob/container/constants.go | 72 +- .../sdk/storage/azblob/container/models.go | 212 +- .../sdk/storage/azblob/container/responses.go | 31 + .../sdk/storage/azblob/doc.go | 43 +- .../storage/azblob/internal/base/clients.go | 86 +- .../azblob/internal/exported/blob_batch.go | 280 + .../azblob/internal/exported/exported.go | 2 +- .../azblob/internal/exported/log_events.go | 20 + .../azblob/internal/exported/set_expiry.go | 71 + .../exported/shared_key_credential.go | 22 +- .../exported/transfer_validation_option.go | 67 + .../azblob/internal/exported/version.go | 4 +- .../internal/generated/appendblob_client.go | 19 +- .../azblob/internal/generated/autorest.md | 187 +- .../azblob/internal/generated/blob_client.go | 33 +- .../internal/generated/block_blob_client.go | 19 +- .../azblob/internal/generated/build.go | 10 + .../azblob/internal/generated/constants.go | 9 + .../internal/generated/container_client.go | 19 +- .../azblob/internal/generated/models.go | 141 + .../internal/generated/pageblob_client.go | 19 +- .../internal/generated/service_client.go | 19 +- .../generated/zz_appendblob_client.go | 393 +- .../internal/generated/zz_blob_client.go | 1781 +- .../internal/generated/zz_blockblob_client.go | 563 +- .../azblob/internal/generated/zz_constants.go | 129 +- .../internal/generated/zz_container_client.go | 889 +- .../azblob/internal/generated/zz_models.go | 1250 +- .../internal/generated/zz_models_serde.go | 239 +- .../azblob/internal/generated/zz_options.go | 1469 ++ .../internal/generated/zz_pageblob_client.go | 756 +- .../internal/generated/zz_response_types.go | 246 +- .../internal/generated/zz_service_client.go | 261 +- .../internal/generated/zz_time_rfc1123.go | 23 +- .../internal/generated/zz_time_rfc3339.go | 35 +- .../internal/generated/zz_xml_helper.go | 26 +- .../azblob/internal/shared/batch_transfer.go | 21 +- .../azblob/internal/shared/buffer_manager.go | 70 + .../internal/shared/challenge_policy.go | 113 + .../azblob/internal/shared/mmf_unix.go | 38 + .../azblob/internal/shared/mmf_windows.go | 56 + .../storage/azblob/internal/shared/shared.go | 87 +- .../internal/shared/transfer_manager.go | 156 - .../sdk/storage/azblob/log.go | 16 + .../sdk/storage/azblob/models.go | 9 +- .../sdk/storage/azblob/pageblob/client.go | 91 +- .../sdk/storage/azblob/pageblob/constants.go | 4 +- .../sdk/storage/azblob/pageblob/models.go | 96 +- .../sdk/storage/azblob/sas/account.go | 183 +- .../sdk/storage/azblob/sas/query_params.go | 145 +- .../sdk/storage/azblob/sas/service.go | 253 +- .../storage/azblob/service/batch_builder.go | 94 + .../sdk/storage/azblob/service/client.go | 148 +- .../sdk/storage/azblob/service/models.go | 183 +- .../sdk/storage/azblob/service/responses.go | 22 + .../sdk/storage/azblob/test-resources.json | 72 +- .../apps/confidential/confidential.go | 36 +- .../apps/internal/base/base.go | 10 + .../internal/base/internal/storage/items.go | 21 +- .../internal/storage/partitioned_storage.go | 20 +- .../internal/base/internal/storage/storage.go | 100 +- .../storage/test_serialized_cache.json | 56 - .../apps/internal/oauth/oauth.go | 1 + .../oauth/ops/accesstokens/accesstokens.go | 8 +- .../internal/oauth/ops/accesstokens/tokens.go | 30 +- .../internal/oauth/ops/authority/authority.go | 37 + .../apps/internal/shared/shared.go | 3 +- .../apps/internal/version/version.go | 2 +- .../apps/public/public.go | 85 +- .../github.com/golang-jwt/jwt/v5/.gitignore | 4 + vendor/github.com/golang-jwt/jwt/v5/LICENSE | 9 + .../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 195 + vendor/github.com/golang-jwt/jwt/v5/README.md | 167 + .../github.com/golang-jwt/jwt/v5/SECURITY.md | 19 + .../golang-jwt/jwt/v5/VERSION_HISTORY.md | 137 + vendor/github.com/golang-jwt/jwt/v5/claims.go | 16 + vendor/github.com/golang-jwt/jwt/v5/doc.go | 4 + vendor/github.com/golang-jwt/jwt/v5/ecdsa.go | 134 + .../golang-jwt/jwt/v5/ecdsa_utils.go | 69 + .../github.com/golang-jwt/jwt/v5/ed25519.go | 79 + .../golang-jwt/jwt/v5/ed25519_utils.go | 64 + vendor/github.com/golang-jwt/jwt/v5/errors.go | 49 + .../golang-jwt/jwt/v5/errors_go1_20.go | 47 + .../golang-jwt/jwt/v5/errors_go_other.go | 78 + vendor/github.com/golang-jwt/jwt/v5/hmac.go | 104 + .../golang-jwt/jwt/v5/map_claims.go | 109 + vendor/github.com/golang-jwt/jwt/v5/none.go | 50 + vendor/github.com/golang-jwt/jwt/v5/parser.go | 238 + .../golang-jwt/jwt/v5/parser_option.go | 128 + .../golang-jwt/jwt/v5/registered_claims.go | 63 + vendor/github.com/golang-jwt/jwt/v5/rsa.go | 93 + .../github.com/golang-jwt/jwt/v5/rsa_pss.go | 135 + .../github.com/golang-jwt/jwt/v5/rsa_utils.go | 107 + .../golang-jwt/jwt/v5/signing_method.go | 49 + .../golang-jwt/jwt/v5/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v5/token.go | 100 + .../golang-jwt/jwt/v5/token_option.go | 5 + vendor/github.com/golang-jwt/jwt/v5/types.go | 149 + .../github.com/golang-jwt/jwt/v5/validator.go | 316 + .../github.com/google/go-cmp/cmp/compare.go | 38 +- .../cmp/{export_unsafe.go => export.go} | 5 - .../google/go-cmp/cmp/export_panic.go | 16 - .../value/{pointer_unsafe.go => pointer.go} | 3 - .../cmp/internal/value/pointer_purego.go | 34 - .../github.com/google/go-cmp/cmp/options.go | 84 +- vendor/github.com/google/go-cmp/cmp/path.go | 46 +- .../google/go-cmp/cmp/report_reflect.go | 2 +- vendor/github.com/google/s2a-go/README.md | 7 +- .../internal/handshaker/service/service.go | 53 +- .../s2a-go/internal/record/ticketsender.go | 8 +- .../google/s2a-go/internal/v2/s2av2.go | 105 +- .../github.com/google/s2a-go/retry/retry.go | 144 + vendor/github.com/google/s2a-go/s2a.go | 47 +- .../github.com/google/s2a-go/s2a_options.go | 7 + .../s2a-go/testdata/mds_client_cert.pem | 19 + .../google/s2a-go/testdata/mds_client_key.pem | 28 + .../google/s2a-go/testdata/mds_root_cert.pem | 21 + .../s2a-go/testdata/mds_server_cert.pem | 21 + .../google/s2a-go/testdata/mds_server_key.pem | 28 + .../s2a-go/testdata/self_signed_cert.pem | 19 + .../s2a-go/testdata/self_signed_key.pem | 28 + vendor/github.com/google/uuid/CHANGELOG.md | 31 + vendor/github.com/google/uuid/CONTRIBUTING.md | 2 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 79 +- vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 + .../client/client.go | 42 +- .../client/util/util.go | 9 + .../prometheus/collectors/expvar_collector.go | 2 +- .../collectors/go_collector_latest.go | 9 +- .../client_golang/prometheus/counter.go | 26 +- .../client_golang/prometheus/desc.go | 28 +- .../prometheus/expvar_collector.go | 2 +- .../client_golang/prometheus/gauge.go | 8 +- .../client_golang/prometheus/histogram.go | 118 +- .../prometheus/internal/difflib.go | 2 +- .../client_golang/prometheus/labels.go | 58 +- .../client_golang/prometheus/metric.go | 3 + .../client_golang/prometheus/promauto/auto.go | 6 +- .../prometheus/promhttp/instrument_server.go | 9 +- .../client_golang/prometheus/push/push.go | 2 +- .../client_golang/prometheus/registry.go | 6 +- .../client_golang/prometheus/summary.go | 41 +- .../client_golang/prometheus/value.go | 55 +- .../client_golang/prometheus/vec.go | 106 +- .../prometheus/client_model/go/metrics.pb.go | 347 +- .../prometheus/procfs/Makefile.common | 8 +- vendor/github.com/prometheus/procfs/fs.go | 8 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- vendor/github.com/prometheus/procfs/proc.go | 2 +- .../github.com/prometheus/procfs/proc_psi.go | 4 +- .../prometheus/procfs/proc_smaps.go | 4 +- vendor/github.com/prometheus/procfs/stat.go | 4 + vendor/github.com/prometheus/procfs/thread.go | 6 +- .../github.com/thanos-io/objstore/.go-version | 2 +- .../thanos-io/objstore/CHANGELOG.md | 14 + vendor/github.com/thanos-io/objstore/Makefile | 2 +- .../github.com/thanos-io/objstore/README.md | 41 +- .../thanos-io/objstore/exthttp/transport.go | 10 + .../github.com/thanos-io/objstore/objstore.go | 144 +- .../objstore/providers/azure/helpers.go | 15 +- .../thanos-io/objstore/providers/gcs/gcs.go | 89 +- .../thanos-io/objstore/providers/s3/s3.go | 28 +- .../objstore/providers/swift/swift.go | 72 +- .../tracing/opentracing/opentracing.go | 4 +- .../x/crypto/internal/poly1305/bits_compat.go | 39 - .../x/crypto/internal/poly1305/bits_go1.13.go | 21 - .../x/crypto/internal/poly1305/sum_generic.go | 43 +- vendor/golang.org/x/net/context/go17.go | 1 - vendor/golang.org/x/net/context/go19.go | 1 - vendor/golang.org/x/net/context/pre_go17.go | 1 - vendor/golang.org/x/net/context/pre_go19.go | 1 - vendor/golang.org/x/net/http2/databuffer.go | 59 +- vendor/golang.org/x/net/http2/frame.go | 11 +- vendor/golang.org/x/net/http2/go111.go | 30 - vendor/golang.org/x/net/http2/go115.go | 27 - vendor/golang.org/x/net/http2/go118.go | 17 - vendor/golang.org/x/net/http2/not_go111.go | 21 - vendor/golang.org/x/net/http2/not_go115.go | 31 - vendor/golang.org/x/net/http2/not_go118.go | 17 - vendor/golang.org/x/net/http2/server.go | 24 +- vendor/golang.org/x/net/http2/transport.go | 33 +- vendor/golang.org/x/net/idna/go118.go | 1 - vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - vendor/golang.org/x/net/idna/pre_go118.go | 1 - vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - .../x/net/internal/socket/cmsghdr.go | 1 - .../x/net/internal/socket/cmsghdr_bsd.go | 1 - .../internal/socket/cmsghdr_linux_32bit.go | 2 - .../internal/socket/cmsghdr_linux_64bit.go | 2 - .../internal/socket/cmsghdr_solaris_64bit.go | 1 - .../x/net/internal/socket/cmsghdr_stub.go | 1 - .../x/net/internal/socket/cmsghdr_unix.go | 1 - .../net/internal/socket/complete_dontwait.go | 1 - .../internal/socket/complete_nodontwait.go | 1 - .../golang.org/x/net/internal/socket/empty.s | 1 - .../x/net/internal/socket/error_unix.go | 1 - .../x/net/internal/socket/iovec_32bit.go | 2 - .../x/net/internal/socket/iovec_64bit.go | 2 - .../internal/socket/iovec_solaris_64bit.go | 1 - .../x/net/internal/socket/iovec_stub.go | 1 - .../x/net/internal/socket/mmsghdr_stub.go | 1 - .../x/net/internal/socket/mmsghdr_unix.go | 1 - .../x/net/internal/socket/msghdr_bsd.go | 1 - .../x/net/internal/socket/msghdr_bsdvar.go | 1 - .../net/internal/socket/msghdr_linux_32bit.go | 2 - .../net/internal/socket/msghdr_linux_64bit.go | 2 - .../internal/socket/msghdr_solaris_64bit.go | 1 - .../x/net/internal/socket/msghdr_stub.go | 1 - .../x/net/internal/socket/msghdr_zos_s390x.go | 1 - .../x/net/internal/socket/norace.go | 1 - .../golang.org/x/net/internal/socket/race.go | 1 - .../x/net/internal/socket/rawconn_mmsg.go | 1 - .../x/net/internal/socket/rawconn_msg.go | 1 - .../x/net/internal/socket/rawconn_nommsg.go | 1 - .../x/net/internal/socket/rawconn_nomsg.go | 1 - .../x/net/internal/socket/sys_bsd.go | 1 - .../x/net/internal/socket/sys_const_unix.go | 1 - .../x/net/internal/socket/sys_linux.go | 1 - .../net/internal/socket/sys_linux_loong64.go | 1 - .../net/internal/socket/sys_linux_riscv64.go | 1 - .../x/net/internal/socket/sys_posix.go | 1 - .../x/net/internal/socket/sys_stub.go | 1 - .../x/net/internal/socket/sys_unix.go | 1 - .../x/net/internal/socket/zsys_aix_ppc64.go | 1 - .../net/internal/socket/zsys_linux_loong64.go | 1 - .../net/internal/socket/zsys_linux_riscv64.go | 1 - vendor/golang.org/x/net/ipv4/control_bsd.go | 1 - .../golang.org/x/net/ipv4/control_pktinfo.go | 1 - vendor/golang.org/x/net/ipv4/control_stub.go | 1 - vendor/golang.org/x/net/ipv4/control_unix.go | 1 - vendor/golang.org/x/net/ipv4/icmp_stub.go | 1 - vendor/golang.org/x/net/ipv4/payload_cmsg.go | 1 - .../golang.org/x/net/ipv4/payload_nocmsg.go | 1 - vendor/golang.org/x/net/ipv4/sockopt_posix.go | 1 - vendor/golang.org/x/net/ipv4/sockopt_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_aix.go | 1 - vendor/golang.org/x/net/ipv4/sys_asmreq.go | 1 - .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 1 - .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_bpf.go | 1 - vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_bsd.go | 1 - vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 1 - .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 1 - vendor/golang.org/x/net/ipv4/sys_stub.go | 1 - .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 1 - .../x/net/ipv4/zsys_linux_loong64.go | 1 - .../x/net/ipv4/zsys_linux_riscv64.go | 1 - .../x/net/ipv6/control_rfc2292_unix.go | 1 - .../x/net/ipv6/control_rfc3542_unix.go | 1 - vendor/golang.org/x/net/ipv6/control_stub.go | 1 - vendor/golang.org/x/net/ipv6/control_unix.go | 1 - vendor/golang.org/x/net/ipv6/icmp_bsd.go | 1 - vendor/golang.org/x/net/ipv6/icmp_stub.go | 1 - vendor/golang.org/x/net/ipv6/payload_cmsg.go | 1 - .../golang.org/x/net/ipv6/payload_nocmsg.go | 1 - vendor/golang.org/x/net/ipv6/sockopt_posix.go | 1 - vendor/golang.org/x/net/ipv6/sockopt_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_aix.go | 1 - vendor/golang.org/x/net/ipv6/sys_asmreq.go | 1 - .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_bpf.go | 1 - vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_bsd.go | 1 - vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 1 - .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 1 - vendor/golang.org/x/net/ipv6/sys_stub.go | 1 - .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 1 - .../x/net/ipv6/zsys_linux_loong64.go | 1 - .../x/net/ipv6/zsys_linux_riscv64.go | 1 - .../clientcredentials/clientcredentials.go | 6 +- vendor/golang.org/x/oauth2/deviceauth.go | 198 + vendor/golang.org/x/oauth2/google/default.go | 31 +- vendor/golang.org/x/oauth2/google/doc.go | 2 + vendor/golang.org/x/oauth2/google/google.go | 46 +- .../google/internal/externalaccount/aws.go | 47 +- .../externalaccount/basecredentials.go | 45 +- .../externalaccount/executablecredsource.go | 4 + .../externalaccount/filecredsource.go | 4 + .../google/internal/externalaccount/header.go | 64 + .../internal/externalaccount/urlcredsource.go | 4 + .../externalaccountauthorizeduser.go | 114 + .../clientauth.go | 8 +- .../sts_exchange.go | 42 +- vendor/golang.org/x/oauth2/internal/token.go | 70 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sync/errgroup/go120.go | 1 - .../golang.org/x/sync/errgroup/pre_go120.go | 1 - .../x/sync/singleflight/singleflight.go | 9 + vendor/golang.org/x/sys/unix/mkerrors.sh | 39 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 90 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 3 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_openbsd_386.go | 2 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm.go | 2 - .../x/sys/unix/zsyscall_openbsd_arm64.go | 2 - .../x/sys/unix/zsyscall_openbsd_mips64.go | 2 - .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2 - .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2 - .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 3 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 125 +- .../golang.org/x/sys/windows/env_windows.go | 17 +- .../x/sys/windows/syscall_windows.go | 4 +- .../x/sys/windows/zsyscall_windows.go | 9 + .../v1/cloudresourcemanager-api.json | 12 +- .../v1/cloudresourcemanager-gen.go | 58 +- .../api/compute/v1/compute-api.json | 2898 +++- .../api/compute/v1/compute-gen.go | 13543 ++++++++++++---- .../iamcredentials/v1/iamcredentials-gen.go | 25 +- vendor/google.golang.org/api/internal/cba.go | 40 +- .../google.golang.org/api/internal/creds.go | 5 +- vendor/google.golang.org/api/internal/s2a.go | 2 +- .../api/internal/settings.go | 17 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 13 + .../api/storage/v1/storage-api.json | 1244 +- .../api/storage/v1/storage-gen.go | 13482 +++++++++------ .../api/transport/grpc/dial.go | 35 +- .../api/transport/grpc/dial_appengine.go | 32 - .../api/transport/http/dial.go | 17 +- .../api/transport/http/dial_appengine.go | 21 - .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- vendor/google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 347 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../internal/socket/socket_service.pb.go | 2822 ---- .../internal/socket/socket_service.proto | 460 - .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../google.golang.org/appengine/socket/doc.go | 10 - .../appengine/socket/socket_classic.go | 290 - .../appengine/socket/socket_vm.go | 64 - vendor/google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 9 +- .../api/annotations/field_info.pb.go | 295 + .../admin/v2/bigtable_instance_admin.pb.go | 196 +- .../googleapis/bigtable/admin/v2/common.pb.go | 4 +- .../bigtable/admin/v2/instance.pb.go | 469 +- vendor/modules.txt | 93 +- 538 files changed, 47742 insertions(+), 23431 deletions(-) create mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go delete mode 100644 vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go rename vendor/cloud.google.com/go/storage/internal/apiv2/{stubs => storagepb}/storage.pb.go (66%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go delete mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json create mode 100644 vendor/github.com/golang-jwt/jwt/v5/.gitignore create mode 100644 vendor/github.com/golang-jwt/jwt/v5/LICENSE create mode 100644 vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/README.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/SECURITY.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md create mode 100644 vendor/github.com/golang-jwt/jwt/v5/claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/doc.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ecdsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/hmac.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/map_claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/none.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser_option.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/registered_claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/signing_method.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token_option.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/types.go create mode 100644 vendor/github.com/golang-jwt/jwt/v5/validator.go rename vendor/github.com/google/go-cmp/cmp/{export_unsafe.go => export.go} (94%) delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go rename vendor/github.com/google/go-cmp/cmp/internal/value/{pointer_unsafe.go => pointer.go} (95%) delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go create mode 100644 vendor/github.com/google/s2a-go/retry/retry.go create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/go115.go delete mode 100644 vendor/golang.org/x/net/http2/go118.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/clientauth.go (88%) rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/sts_exchange.go (68%) create mode 100644 vendor/golang.org/x/oauth2/pkce.go delete mode 100644 vendor/google.golang.org/api/transport/grpc/dial_appengine.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto delete mode 100644 vendor/google.golang.org/appengine/socket/doc.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index ccdf08bd974b6..aa03126166077 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -1067,11 +1067,32 @@ storage: [max_connections_per_host: ] + [ca_file: | default = ""] + gcs: [bucket_name: | default = ""] service_account: + http: + [idle_conn_timeout: ] + + [response_header_timeout: ] + + [insecure_skip_verify: ] + + [tls_handshake_timeout: ] + + [expect_continue_timeout: ] + + [max_idle_connections: ] + + [max_idle_connections_per_host: ] + + [max_connections_per_host: ] + + [ca_file: | default = ""] + azure: [account_name: | default = ""] @@ -1102,6 +1123,8 @@ storage: [max_connections_per_host: ] + [ca_file: | default = ""] + swift: [auth_version: ] @@ -2493,16 +2516,16 @@ objstore_config: http: # The time an idle connection will remain idle before closing. - # CLI flag: -thanos.s3.http.idle-conn-timeout + # CLI flag: -thanos.s3.idle-conn-timeout [idle_conn_timeout: | default = 1m30s] # The amount of time the client will wait for a servers response headers. - # CLI flag: -thanos.s3.http.response-header-timeout + # CLI flag: -thanos.s3.response-header-timeout [response_header_timeout: | default = 2m] # If the client connects via HTTPS and this option is enabled, the client # will accept any certificate and hostname. - # CLI flag: -thanos.s3.http.insecure-skip-verify + # CLI flag: -thanos.s3.insecure-skip-verify [insecure_skip_verify: | default = false] # Maximum time to wait for a TLS handshake. 0 means no limit. @@ -2529,6 +2552,11 @@ objstore_config: # CLI flag: -thanos.s3.max-connections-per-host [max_connections_per_host: | default = 0] + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -thanos.s3.ca-file + [ca_file: | default = ""] + gcs: # GCS bucket name # CLI flag: -thanos.gcs.bucket-name @@ -2540,6 +2568,49 @@ objstore_config: # CLI flag: -thanos.gcs.service-account [service_account: | default = ""] + http: + # The time an idle connection will remain idle before closing. + # CLI flag: -thanos.s3.http.idle-conn-timeout + [idle_conn_timeout: | default = 1m30s] + + # The amount of time the client will wait for a servers response headers. + # CLI flag: -thanos.s3.http.response-header-timeout + [response_header_timeout: | default = 2m] + + # If the client connects via HTTPS and this option is enabled, the client + # will accept any certificate and hostname. + # CLI flag: -thanos.s3.http.insecure-skip-verify + [insecure_skip_verify: | default = false] + + # Maximum time to wait for a TLS handshake. 0 means no limit. + # CLI flag: -thanos.s3.http.tls-handshake-timeout + [tls_handshake_timeout: | default = 10s] + + # The time to wait for a server's first response headers after fully + # writing the request headers if the request has an Expect header. 0 to + # send the request body immediately. + # CLI flag: -thanos.s3.http.expect-continue-timeout + [expect_continue_timeout: | default = 1s] + + # Maximum number of idle (keep-alive) connections across all hosts. 0 + # means no limit. + # CLI flag: -thanos.s3.http.max-idle-connections + [max_idle_connections: | default = 100] + + # Maximum number of idle (keep-alive) connections to keep per-host. If 0, + # a built-in default value is used. + # CLI flag: -thanos.s3.http.max-idle-connections-per-host + [max_idle_connections_per_host: | default = 100] + + # Maximum number of connections per host. 0 means no limit. + # CLI flag: -thanos.s3.http.max-connections-per-host + [max_connections_per_host: | default = 0] + + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -thanos.s3.http.ca-file + [ca_file: | default = ""] + azure: # Azure storage account name # CLI flag: -thanos.azure.account-name @@ -2571,16 +2642,16 @@ objstore_config: http: # The time an idle connection will remain idle before closing. - # CLI flag: -thanos.azure.http.idle-conn-timeout + # CLI flag: -thanos.azure.idle-conn-timeout [idle_conn_timeout: | default = 1m30s] # The amount of time the client will wait for a servers response headers. - # CLI flag: -thanos.azure.http.response-header-timeout + # CLI flag: -thanos.azure.response-header-timeout [response_header_timeout: | default = 2m] # If the client connects via HTTPS and this option is enabled, the client # will accept any certificate and hostname. - # CLI flag: -thanos.azure.http.insecure-skip-verify + # CLI flag: -thanos.azure.insecure-skip-verify [insecure_skip_verify: | default = false] # Maximum time to wait for a TLS handshake. 0 means no limit. @@ -2607,6 +2678,11 @@ objstore_config: # CLI flag: -thanos.azure.max-connections-per-host [max_connections_per_host: | default = 0] + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -thanos.azure.ca-file + [ca_file: | default = ""] + swift: # OpenStack Swift authentication API version. 0 to autodetect. # CLI flag: -thanos.swift.auth-version @@ -4313,17 +4389,17 @@ storage: http: # The time an idle connection will remain idle before closing. - # CLI flag: -common.storage.thanos.s3.http.idle-conn-timeout + # CLI flag: -common.storage.thanos.s3.idle-conn-timeout [idle_conn_timeout: | default = 1m30s] # The amount of time the client will wait for a servers response # headers. - # CLI flag: -common.storage.thanos.s3.http.response-header-timeout + # CLI flag: -common.storage.thanos.s3.response-header-timeout [response_header_timeout: | default = 2m] # If the client connects via HTTPS and this option is enabled, the # client will accept any certificate and hostname. - # CLI flag: -common.storage.thanos.s3.http.insecure-skip-verify + # CLI flag: -common.storage.thanos.s3.insecure-skip-verify [insecure_skip_verify: | default = false] # Maximum time to wait for a TLS handshake. 0 means no limit. @@ -4350,6 +4426,11 @@ storage: # CLI flag: -common.storage.thanos.s3.max-connections-per-host [max_connections_per_host: | default = 0] + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -common.storage.thanos.s3.ca-file + [ca_file: | default = ""] + gcs: # GCS bucket name # CLI flag: -common.storage.thanos.gcs.bucket-name @@ -4361,6 +4442,50 @@ storage: # CLI flag: -common.storage.thanos.gcs.service-account [service_account: | default = ""] + http: + # The time an idle connection will remain idle before closing. + # CLI flag: -common.storage.thanos.s3.http.idle-conn-timeout + [idle_conn_timeout: | default = 1m30s] + + # The amount of time the client will wait for a servers response + # headers. + # CLI flag: -common.storage.thanos.s3.http.response-header-timeout + [response_header_timeout: | default = 2m] + + # If the client connects via HTTPS and this option is enabled, the + # client will accept any certificate and hostname. + # CLI flag: -common.storage.thanos.s3.http.insecure-skip-verify + [insecure_skip_verify: | default = false] + + # Maximum time to wait for a TLS handshake. 0 means no limit. + # CLI flag: -common.storage.thanos.s3.http.tls-handshake-timeout + [tls_handshake_timeout: | default = 10s] + + # The time to wait for a server's first response headers after fully + # writing the request headers if the request has an Expect header. 0 to + # send the request body immediately. + # CLI flag: -common.storage.thanos.s3.http.expect-continue-timeout + [expect_continue_timeout: | default = 1s] + + # Maximum number of idle (keep-alive) connections across all hosts. 0 + # means no limit. + # CLI flag: -common.storage.thanos.s3.http.max-idle-connections + [max_idle_connections: | default = 100] + + # Maximum number of idle (keep-alive) connections to keep per-host. If + # 0, a built-in default value is used. + # CLI flag: -common.storage.thanos.s3.http.max-idle-connections-per-host + [max_idle_connections_per_host: | default = 100] + + # Maximum number of connections per host. 0 means no limit. + # CLI flag: -common.storage.thanos.s3.http.max-connections-per-host + [max_connections_per_host: | default = 0] + + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -common.storage.thanos.s3.http.ca-file + [ca_file: | default = ""] + azure: # Azure storage account name # CLI flag: -common.storage.thanos.azure.account-name @@ -4392,17 +4517,17 @@ storage: http: # The time an idle connection will remain idle before closing. - # CLI flag: -common.storage.thanos.azure.http.idle-conn-timeout + # CLI flag: -common.storage.thanos.azure.idle-conn-timeout [idle_conn_timeout: | default = 1m30s] # The amount of time the client will wait for a servers response # headers. - # CLI flag: -common.storage.thanos.azure.http.response-header-timeout + # CLI flag: -common.storage.thanos.azure.response-header-timeout [response_header_timeout: | default = 2m] # If the client connects via HTTPS and this option is enabled, the # client will accept any certificate and hostname. - # CLI flag: -common.storage.thanos.azure.http.insecure-skip-verify + # CLI flag: -common.storage.thanos.azure.insecure-skip-verify [insecure_skip_verify: | default = false] # Maximum time to wait for a TLS handshake. 0 means no limit. @@ -4429,6 +4554,11 @@ storage: # CLI flag: -common.storage.thanos.azure.max-connections-per-host [max_connections_per_host: | default = 0] + # Path to the trusted CA file that signed the SSL certificate of the + # object storage endpoint. + # CLI flag: -common.storage.thanos.azure.ca-file + [ca_file: | default = ""] + swift: # OpenStack Swift authentication API version. 0 to autodetect. # CLI flag: -common.storage.thanos.swift.auth-version diff --git a/go.mod b/go.mod index 5b4c32ddb214b..90278ec82f1e0 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.21.3 require ( cloud.google.com/go/bigtable v1.18.1 cloud.google.com/go/pubsub v1.33.0 - cloud.google.com/go/storage v1.30.1 + cloud.google.com/go/storage v1.35.1 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/go-autorest/autorest/adal v0.9.23 @@ -44,9 +44,9 @@ require ( github.com/gogo/status v1.1.1 github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.3.1 + github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 @@ -83,8 +83,8 @@ require ( // github.com/pierrec/lz4 v2.0.5+incompatible github.com/pierrec/lz4/v4 v4.1.18 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 github.com/segmentio/fasthash v1.0.3 @@ -99,12 +99,12 @@ require ( go.etcd.io/bbolt v1.3.6 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.2.1 - golang.org/x/crypto v0.17.0 - golang.org/x/net v0.17.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.15.0 - golang.org/x/time v0.3.0 - google.golang.org/api v0.132.0 + golang.org/x/crypto v0.19.0 + golang.org/x/net v0.21.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.17.0 + golang.org/x/time v0.4.0 + google.golang.org/api v0.150.0 google.golang.org/grpc v1.59.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 @@ -131,12 +131,12 @@ require ( github.com/prometheus/alertmanager v0.26.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/richardartoul/molecule v1.0.0 - github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 + github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 github.com/willf/bloom v2.0.3+incompatible go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b - golang.org/x/oauth2 v0.11.0 + golang.org/x/oauth2 v0.14.0 golang.org/x/text v0.14.0 google.golang.org/protobuf v1.31.0 k8s.io/apimachinery v0.28.1 @@ -144,17 +144,17 @@ require ( ) require ( - cloud.google.com/go v0.110.7 // indirect - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.1 // indirect - cloud.google.com/go/longrunning v0.5.1 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/longrunning v0.5.4 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect @@ -163,7 +163,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect @@ -225,14 +225,15 @@ require ( github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gophercloud/gophercloud v1.5.0 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect @@ -279,10 +280,10 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/oschwald/maxminddb-golang v1.11.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 // indirect - github.com/prometheus/procfs v0.11.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rootless-containers/rootlesskit v1.1.0 // indirect github.com/rs/xid v1.5.0 // indirect @@ -315,13 +316,13 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/tools v0.11.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 744c904e823c2..0a50334d17dcc 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= @@ -60,8 +60,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= @@ -82,14 +82,14 @@ cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFP cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= -cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= -cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= +cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= @@ -127,8 +127,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= +cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= @@ -149,14 +149,16 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 h1:BMTdr+ib5ljLa9MxTJK8x/Ds0MbBb4MfuW5BL0zMJnI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -225,8 +227,8 @@ github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvd github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -836,6 +838,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -902,8 +906,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -943,20 +948,20 @@ github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYa github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1512,8 +1517,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -1544,16 +1549,16 @@ github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1582,8 +1587,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk= -github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 h1:6ksZ7t1hNOzGPPs8DK7SvXQf6UfWzi+W5Z7PCBl8gx4= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1715,8 +1720,8 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955u github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= -github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 h1:avZFY25vRM35FggTBQj2WXq45yEvIKbDLUcNDrJLfKU= -github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3/go.mod h1:oJ82xgcBDzGJrEgUsjlTj6n01+ZWUMMUR8BlZzX5xDE= +github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 h1:Q0BjHI7FMe5KkKVXBFYto5VNASxiA/+AEhHup/IT7N0= +github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3/go.mod h1:ptMYNPgbyAR7a2Ab2t7zHA2/0be2ePyawVR7lp7fZtg= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1900,14 +1905,13 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2036,8 +2040,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2062,8 +2066,8 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2079,8 +2083,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2175,7 +2179,6 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2207,8 +2210,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2217,8 +2220,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2245,8 +2248,8 @@ golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= +golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2336,8 +2339,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= @@ -2394,8 +2397,8 @@ google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOI google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= -google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= +google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= +google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2403,8 +2406,9 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2509,12 +2513,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= -google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/pkg/storage/bucket/gcs/config.go b/pkg/storage/bucket/gcs/config.go index 1e212352281eb..a3446e6fffcba 100644 --- a/pkg/storage/bucket/gcs/config.go +++ b/pkg/storage/bucket/gcs/config.go @@ -2,14 +2,27 @@ package gcs import ( "flag" + "net/http" "github.com/grafana/dskit/flagext" + + bucket_http "github.com/grafana/loki/pkg/storage/bucket/http" ) +// HTTPConfig stores the http.Transport configuration for the s3 minio client. +type HTTPConfig struct { + bucket_http.Config `yaml:",inline"` + + // Allow upstream callers to inject a round tripper + Transport http.RoundTripper `yaml:"-"` +} + // Config holds the config options for GCS backend type Config struct { BucketName string `yaml:"bucket_name"` ServiceAccount flagext.Secret `yaml:"service_account"` + + HTTP HTTPConfig `yaml:"http"` } // RegisterFlags registers the flags for GCS storage @@ -21,4 +34,5 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.BucketName, prefix+"gcs.bucket-name", "", "GCS bucket name") f.Var(&cfg.ServiceAccount, prefix+"gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.") + cfg.HTTP.RegisterFlagsWithPrefix(prefix+"s3.http.", f) } diff --git a/pkg/storage/bucket/http/config.go b/pkg/storage/bucket/http/config.go index 1c83e1f311bbb..509de0bf301f1 100644 --- a/pkg/storage/bucket/http/config.go +++ b/pkg/storage/bucket/http/config.go @@ -15,6 +15,7 @@ type Config struct { MaxIdleConns int `yaml:"max_idle_connections"` MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host"` MaxConnsPerHost int `yaml:"max_connections_per_host"` + CAFile string `yaml:"ca_file"` } // RegisterFlags registers the flags for the storage HTTP client. @@ -24,12 +25,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix registers the flags for the storage HTTP client with the provided prefix. func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.IdleConnTimeout, prefix+"http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.") - f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.") - f.BoolVar(&cfg.InsecureSkipVerify, prefix+"http.insecure-skip-verify", false, "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.") + f.DurationVar(&cfg.IdleConnTimeout, prefix+"idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.") + f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.") + f.BoolVar(&cfg.InsecureSkipVerify, prefix+"insecure-skip-verify", false, "If the client connects via HTTPS and this option is enabled, the client will accept any certificate and hostname.") f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.") f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.") f.IntVar(&cfg.MaxIdleConns, prefix+"max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.") f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.") f.IntVar(&cfg.MaxConnsPerHost, prefix+"max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.") + f.StringVar(&cfg.CAFile, prefix+"ca-file", "", "Path to the trusted CA file that signed the SSL certificate of the object storage endpoint.") } diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index da72b865dd6fc..a16233094d593 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -20,24 +20,38 @@ import ( ) type GCSThanosObjectClient struct { - client objstore.Bucket + client objstore.Bucket + hedgedClient objstore.Bucket } func NewGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedgingCfg hedging.Config, reg prometheus.Registerer) (*GCSThanosObjectClient, error) { - //TODO(JoaoBraveCoding) Add Hedging client once we are able to configure HTTP on GCS provider - return newGCSThanosObjectClient(ctx, cfg, component, logger, reg) -} - -func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, reg prometheus.Registerer) (*GCSThanosObjectClient, error) { - bucket, err := bucket.NewClient(ctx, cfg, component, logger, reg) + client, err := newGCSThanosObjectClient(ctx, cfg, component, logger, false, hedgingCfg, prometheus.WrapRegistererWith(prometheus.Labels{"hedging": "false"}, reg)) + if err != nil { + return nil, err + } + hedgedClient, err := newGCSThanosObjectClient(ctx, cfg, component, logger, true, hedgingCfg, prometheus.WrapRegistererWith(prometheus.Labels{"hedging": "true"}, reg)) if err != nil { return nil, err } return &GCSThanosObjectClient{ - client: bucket, + client: client, + hedgedClient: hedgedClient, }, nil } +func newGCSThanosObjectClient(ctx context.Context, cfg bucket.Config, component string, logger log.Logger, hedging bool, hedgingCfg hedging.Config, reg prometheus.Registerer) (objstore.Bucket, error) { + if hedging { + hedgedTrasport, err := hedgingCfg.RoundTripperWithRegisterer(nil, reg) + if err != nil { + return nil, err + } + + cfg.GCS.HTTP.Transport = hedgedTrasport + } + + return bucket.NewClient(ctx, cfg, component, logger, reg) +} + func (s *GCSThanosObjectClient) Stop() {} // ObjectExists checks if a given objectKey exists in the GCS bucket diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go index 0fcb35fe379ac..0f241373fa189 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client_test.go @@ -6,9 +6,10 @@ import ( "sort" "testing" + "github.com/stretchr/testify/require" + "github.com/grafana/loki/pkg/storage/bucket/filesystem" "github.com/grafana/loki/pkg/storage/chunk/client" - "github.com/stretchr/testify/require" ) func TestGCSThanosObjStore_List(t *testing.T) { diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json index 2c8e1e1e07577..38a67bae445bd 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,13 +1,15 @@ { - "bigquery": "1.53.0", - "bigtable": "1.19.0", - "datastore": "1.13.0", + "auth": "0.1.0", + "auth/oauth2adapt": "0.1.0", + "bigquery": "1.57.0", + "bigtable": "1.20.0", + "datastore": "1.15.0", "errorreporting": "0.3.0", - "firestore": "1.11.0", - "logging": "1.7.0", - "profiler": "0.3.1", + "firestore": "1.14.0", + "logging": "1.8.1", + "profiler": "0.4.0", "pubsub": "1.33.0", "pubsublite": "1.8.1", - "spanner": "1.47.0", - "storage": "1.31.0" + "spanner": "1.51.0", + "storage": "1.34.0" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index c02b65f486807..12fed1bc181e0 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,128 +1,131 @@ { - "accessapproval": "1.7.1", - "accesscontextmanager": "1.8.1", - "advisorynotifications": "1.0.0", - "ai": "0.1.1", - "aiplatform": "1.48.0", - "alloydb": "1.4.0", - "analytics": "0.21.3", - "apigateway": "1.6.1", - "apigeeconnect": "1.6.1", - "apigeeregistry": "0.7.1", - "apikeys": "1.1.1", - "appengine": "1.8.1", - "area120": "0.8.1", - "artifactregistry": "1.14.1", - "asset": "1.14.1", - "assuredworkloads": "1.11.1", - "automl": "1.13.1", - "baremetalsolution": "1.1.1", - "batch": "1.3.1", - "beyondcorp": "1.0.0", - "billing": "1.16.0", - "binaryauthorization": "1.6.1", - "certificatemanager": "1.7.1", - "channel": "1.16.0", - "cloudbuild": "1.13.0", - "clouddms": "1.6.1", - "cloudtasks": "1.12.1", - "commerce": "0.1.0", - "compute": "1.23.0", + "accessapproval": "1.7.3", + "accesscontextmanager": "1.8.3", + "advisorynotifications": "1.2.2", + "ai": "0.1.3", + "aiplatform": "1.51.2", + "alloydb": "1.6.2", + "analytics": "0.21.5", + "apigateway": "1.6.3", + "apigeeconnect": "1.6.3", + "apigeeregistry": "0.8.1", + "apikeys": "1.1.3", + "appengine": "1.8.3", + "area120": "0.8.3", + "artifactregistry": "1.14.5", + "asset": "1.15.2", + "assuredworkloads": "1.11.3", + "automl": "1.13.3", + "baremetalsolution": "1.2.2", + "batch": "1.6.2", + "beyondcorp": "1.0.2", + "billing": "1.17.3", + "binaryauthorization": "1.7.2", + "certificatemanager": "1.7.3", + "channel": "1.17.2", + "cloudbuild": "1.14.2", + "clouddms": "1.7.2", + "cloudtasks": "1.12.3", + "commerce": "0.1.2", + "compute": "1.23.2", "compute/metadata": "0.2.3", - "confidentialcomputing": "1.1.0", - "contactcenterinsights": "1.10.0", - "container": "1.24.0", - "containeranalysis": "0.10.1", - "datacatalog": "1.16.0", - "dataflow": "0.9.1", - "dataform": "0.8.1", - "datafusion": "1.7.1", - "datalabeling": "0.8.1", - "dataplex": "1.9.0", - "dataproc": "2.0.1", - "dataqna": "0.8.1", - "datastream": "1.10.0", - "deploy": "1.13.0", - "dialogflow": "1.40.0", - "discoveryengine": "1.1.0", - "dlp": "1.10.1", - "documentai": "1.22.0", - "domains": "0.9.1", - "edgecontainer": "1.1.1", - "essentialcontacts": "1.6.2", - "eventarc": "1.13.0", - "filestore": "1.7.1", - "functions": "1.15.1", - "gkebackup": "1.3.0", - "gkeconnect": "0.8.1", - "gkehub": "0.14.1", - "gkemulticloud": "1.0.0", - "grafeas": "0.3.1", - "gsuiteaddons": "1.6.1", - "iam": "1.1.1", - "iap": "1.8.1", - "ids": "1.4.1", - "iot": "1.7.1", - "kms": "1.15.0", - "language": "1.10.1", - "lifesciences": "0.9.1", - "longrunning": "0.5.1", - "managedidentities": "1.6.1", - "maps": "1.4.0", - "mediatranslation": "0.8.1", - "memcache": "1.10.1", - "metastore": "1.12.0", - "migrationcenter": "0.1.0", - "monitoring": "1.15.1", - "netapp": "0.1.0", - "networkconnectivity": "1.12.1", - "networkmanagement": "1.8.0", - "networksecurity": "0.9.1", - "notebooks": "1.9.1", - "optimization": "1.4.1", - "orchestration": "1.8.1", - "orgpolicy": "1.11.1", - "osconfig": "1.12.1", - "oslogin": "1.10.1", - "phishingprotection": "0.8.1", - "policysimulator": "0.1.0", - "policytroubleshooter": "1.8.0", - "privatecatalog": "0.9.1", - "rapidmigrationassessment": "1.0.0", - "recaptchaenterprise": "2.7.2", - "recommendationengine": "0.8.1", - "recommender": "1.10.1", - "redis": "1.13.1", - "resourcemanager": "1.9.1", - "resourcesettings": "1.6.1", - "retail": "1.14.1", - "run": "1.2.0", - "scheduler": "1.10.1", - "secretmanager": "1.11.1", - "security": "1.15.1", - "securitycenter": "1.23.0", - "servicecontrol": "1.12.1", - "servicedirectory": "1.11.0", - "servicemanagement": "1.9.2", - "serviceusage": "1.7.1", - "shell": "1.7.1", - "speech": "1.19.0", - "storageinsights": "1.0.0", - "storagetransfer": "1.10.0", - "support": "1.0.0", - "talent": "1.6.2", - "texttospeech": "1.7.1", - "tpu": "1.6.1", - "trace": "1.10.1", - "translate": "1.8.2", - "video": "1.19.0", - "videointelligence": "1.11.1", - "vision": "2.7.2", - "vmmigration": "1.7.1", - "vmwareengine": "1.0.0", - "vpcaccess": "1.7.1", - "webrisk": "1.9.1", - "websecurityscanner": "1.6.1", - "workflows": "1.11.1", - "workstations": "0.4.1" + "confidentialcomputing": "1.3.2", + "config": "0.1.3", + "contactcenterinsights": "1.11.2", + "container": "1.27.0", + "containeranalysis": "0.11.2", + "datacatalog": "1.18.2", + "dataflow": "0.9.3", + "dataform": "0.9.0", + "datafusion": "1.7.3", + "datalabeling": "0.8.3", + "dataplex": "1.11.0", + "dataproc": "2.2.2", + "dataqna": "0.8.3", + "datastream": "1.10.2", + "deploy": "1.14.1", + "dialogflow": "1.44.2", + "discoveryengine": "1.2.2", + "dlp": "1.11.0", + "documentai": "1.23.4", + "domains": "0.9.3", + "edgecontainer": "1.1.3", + "essentialcontacts": "1.6.4", + "eventarc": "1.13.2", + "filestore": "1.7.3", + "functions": "1.15.3", + "gkebackup": "1.3.3", + "gkeconnect": "0.8.3", + "gkehub": "0.14.3", + "gkemulticloud": "1.0.2", + "grafeas": "0.3.3", + "gsuiteaddons": "1.6.3", + "iam": "1.1.4", + "iap": "1.9.2", + "ids": "1.4.3", + "iot": "1.7.3", + "kms": "1.15.4", + "language": "1.12.1", + "lifesciences": "0.9.3", + "longrunning": "0.5.3", + "managedidentities": "1.6.3", + "maps": "1.6.0", + "mediatranslation": "0.8.3", + "memcache": "1.10.3", + "metastore": "1.13.2", + "migrationcenter": "0.2.2", + "monitoring": "1.16.2", + "netapp": "0.2.2", + "networkconnectivity": "1.14.2", + "networkmanagement": "1.9.2", + "networksecurity": "0.9.3", + "notebooks": "1.11.1", + "optimization": "1.6.1", + "orchestration": "1.8.3", + "orgpolicy": "1.11.3", + "osconfig": "1.12.3", + "oslogin": "1.12.1", + "phishingprotection": "0.8.3", + "policysimulator": "0.2.1", + "policytroubleshooter": "1.10.1", + "privatecatalog": "0.9.3", + "rapidmigrationassessment": "1.0.3", + "recaptchaenterprise": "2.8.2", + "recommendationengine": "0.8.3", + "recommender": "1.11.2", + "redis": "1.14.0", + "resourcemanager": "1.9.3", + "resourcesettings": "1.6.3", + "retail": "1.14.3", + "run": "1.3.2", + "scheduler": "1.10.3", + "secretmanager": "1.11.3", + "securesourcemanager": "0.1.1", + "security": "1.15.3", + "securitycenter": "1.24.1", + "servicecontrol": "1.12.3", + "servicedirectory": "1.11.2", + "servicemanagement": "1.9.4", + "serviceusage": "1.8.2", + "shell": "1.7.3", + "shopping": "0.2.1", + "speech": "1.20.0", + "storageinsights": "1.0.3", + "storagetransfer": "1.10.2", + "support": "1.0.2", + "talent": "1.6.4", + "texttospeech": "1.7.3", + "tpu": "1.6.3", + "trace": "1.10.3", + "translate": "1.9.2", + "video": "1.20.2", + "videointelligence": "1.11.3", + "vision": "2.7.4", + "vmmigration": "1.7.3", + "vmwareengine": "1.0.2", + "vpcaccess": "1.7.3", + "webrisk": "1.9.3", + "websecurityscanner": "1.6.3", + "workflows": "1.12.2", + "workstations": "0.5.2" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 7c95dc1b009fd..f1afd6591f971 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.110.7" + ".": "0.110.10" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index f500b8ee9bf37..a418e2ee1cfb7 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,36 @@ # Changes +## [0.110.10](https://github.com/googleapis/google-cloud-go/compare/v0.110.9...v0.110.10) (2023-10-31) + + +### Bug Fixes + +* **all:** Update grpc-go to v1.56.3 ([#8916](https://github.com/googleapis/google-cloud-go/issues/8916)) ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **all:** Update grpc-go to v1.59.0 ([#8922](https://github.com/googleapis/google-cloud-go/issues/8922)) ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) +* **internal/godocfx:** Fix links to other packages in summary ([#8756](https://github.com/googleapis/google-cloud-go/issues/8756)) ([6220a9a](https://github.com/googleapis/google-cloud-go/commit/6220a9afeb89df3080e9e663e97648939fd4e15f)) + +## [0.110.9](https://github.com/googleapis/google-cloud-go/compare/v0.110.8...v0.110.9) (2023-10-19) + + +### Bug Fixes + +* **all:** Update golang.org/x/net to v0.17.0 ([#8705](https://github.com/googleapis/google-cloud-go/issues/8705)) ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/aliasgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/examples/fake:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/gapicgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/generated/snippets:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/godocfx:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **internal/postprocessor:** Add ability to override release level ([#8643](https://github.com/googleapis/google-cloud-go/issues/8643)) ([26c608a](https://github.com/googleapis/google-cloud-go/commit/26c608a8204d740767dfebf6aa473cdf1873e5f0)) +* **internal/postprocessor:** Add missing assignment ([#8646](https://github.com/googleapis/google-cloud-go/issues/8646)) ([d8c5746](https://github.com/googleapis/google-cloud-go/commit/d8c5746e6dde1bd34c01a9886804f861c88c0cb7)) +* **internal/postprocessor:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [0.110.8](https://github.com/googleapis/google-cloud-go/compare/v0.110.7...v0.110.8) (2023-09-11) + + +### Documentation + +* **postprocessor:** Nudge users towards stable clients ([#8513](https://github.com/googleapis/google-cloud-go/issues/8513)) ([05a1484](https://github.com/googleapis/google-cloud-go/commit/05a1484b0752aaa3d6a164d37686d6de070cc78d)) + ## [0.110.7](https://github.com/googleapis/google-cloud-go/compare/v0.110.6...v0.110.7) (2023-07-31) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 3a391131aa841..18f175dac99be 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -128,6 +128,7 @@ project's service account. - `GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES`: Comma separated list of developer's Datastore databases. If not provided, default database i.e. empty string is used. - `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES` : Comma separated list of developer's Firestore databases. If not provided, default database is used. - `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the Firestore project's service account. - `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above. @@ -228,6 +229,9 @@ export GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES=your-database-1,your-database-2 # Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project. export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project +# Comma separated list of developer's Firestore databases. If not provided, default database is used. +export GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES=your-database-1,your-database-2 + # The path to the JSON key file of the Firestore project's service account. export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 6ce222dccac6c..f6c8159b59e55 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -10,17 +10,13 @@ import "cloud.google.com/go" To install the packages on your system, *do not clone the repo*. Instead: -1. Change to your project directory: - - ```bash - cd /my/cloud/project - ``` +1. Change to your project directory: `cd /my/cloud/project` 1. Get the package you want to use. Some products have their own module, so it's best to `go get` the package(s) you want to use: - ``` - $ go get cloud.google.com/go/firestore # Replace with the package you want to use. - ``` +```bash +go get cloud.google.com/go/firestore # Replace with the package you want to use. +``` **NOTE:** Some of these packages are under development, and may occasionally make backwards-incompatible changes. @@ -35,10 +31,9 @@ For an updated list of all of our released APIs please see our Our libraries are compatible with at least the three most recent, major Go releases. They are currently compatible with: +- Go 1.21 - Go 1.20 - Go 1.19 -- Go 1.18 -- Go 1.17 ## Authorization @@ -46,7 +41,6 @@ By default, each API will use [Google Application Default Credentials](https://d for authorization credentials used in calling the API endpoints. This will allow your application to run in many environments without requiring explicit configuration. -[snip]:# (auth) ```go client, err := storage.NewClient(ctx) ``` @@ -57,7 +51,6 @@ pass [`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) to the `NewClient` function of the desired package. For example: -[snip]:# (auth-JSON) ```go client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) ``` @@ -67,7 +60,7 @@ You can exert more control over authorization by using the create an `oauth2.TokenSource`. Then pass [`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) to the `NewClient` function: -[snip]:# (auth-ts) + ```go tokenSource := ... client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) @@ -84,51 +77,6 @@ By participating in this project you agree to abide by its terms. See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) for more information. -[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory -[cloud-automl]: https://cloud.google.com/automl -[cloud-build]: https://cloud.google.com/cloud-build/ -[cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigtable]: https://cloud.google.com/bigtable/ -[cloud-compute]: https://cloud.google.com/compute -[cloud-container]: https://cloud.google.com/containers/ -[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis -[cloud-dataproc]: https://cloud.google.com/dataproc/ -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ -[cloud-debugger]: https://cloud.google.com/debugger/ -[cloud-dlp]: https://cloud.google.com/dlp/ -[cloud-errors]: https://cloud.google.com/error-reporting/ -[cloud-firestore]: https://cloud.google.com/firestore/ -[cloud-iam]: https://cloud.google.com/iam/ -[cloud-iot]: https://cloud.google.com/iot-core/ -[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts -[cloud-kms]: https://cloud.google.com/kms/ -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsublite]: https://cloud.google.com/pubsub/lite -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-language]: https://cloud.google.com/natural-language -[cloud-logging]: https://cloud.google.com/logging/ -[cloud-natural-language]: https://cloud.google.com/natural-language/ -[cloud-memorystore]: https://cloud.google.com/memorystore/ -[cloud-monitoring]: https://cloud.google.com/monitoring/ -[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest -[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ -[cloud-securitycenter]: https://cloud.google.com/security-command-center/ -[cloud-scheduler]: https://cloud.google.com/scheduler -[cloud-spanner]: https://cloud.google.com/spanner/ -[cloud-speech]: https://cloud.google.com/speech -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-tasks]: https://cloud.google.com/tasks/ -[cloud-texttospeech]: https://cloud.google.com/texttospeech/ -[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ -[cloud-trace]: https://cloud.google.com/trace/ -[cloud-translate]: https://cloud.google.com/translate -[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ -[cloud-recommender]: https://cloud.google.com/recommendations/ -[cloud-video]: https://cloud.google.com/video-intelligence/ -[cloud-vision]: https://cloud.google.com/vision -[cloud-webrisk]: https://cloud.google.com/web-risk/ - ## Links - [Go on Google Cloud](https://cloud.google.com/go/home) diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 6395537003224..540ad16ac491f 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.23.0" +const Version = "1.23.3" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 0cef5cebf13c9..c4cacb03f880d 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,34 @@ # Changes +## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26) + + +### Bug Fixes + +* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) + + +### Documentation + +* **iam:** Minor formatting ([b4349cc](https://github.com/googleapis/google-cloud-go/commit/b4349cc507870ff8629bbc07de578b63bb889626)) + ## [1.1.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.0...iam/v1.1.1) (2023-06-20) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 3d3e763238310..85346a891df1e 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/iam_policy.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index adc445b0718bc..68f8d761f7f73 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/options.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index 69c720e733749..eefd1d0e546b7 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/policy.proto @@ -219,6 +219,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // // **JSON example:** // +// ``` +// // { // "bindings": [ // { @@ -247,8 +249,12 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // "version": 3 // } // +// ``` +// // **YAML example:** // +// ``` +// // bindings: // - members: // - user:mike@example.com @@ -266,6 +272,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // etag: BwWWja0YfJA= // version: 3 // +// ``` +// // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). type Policy struct { @@ -396,7 +404,7 @@ type Binding struct { // Role that is assigned to the list of `members`, or principals. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the principals requesting access for a Cloud Platform resource. + // Specifies the principals requesting access for a Google Cloud resource. // `members` can have the following values: // // * `allUsers`: A special identifier that represents anyone who is @@ -558,8 +566,8 @@ func (x *Binding) GetCondition() *expr.Expr { // } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. +// logging. It also exempts `jose@example.com` from DATA_READ logging, and +// `aliya@example.com` from DATA_WRITE logging. type AuditConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -770,7 +778,7 @@ type BindingDelta struct { // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. // Required Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. + // A single identity requesting access for a Google Cloud resource. // Follows the same format of Binding.members. // Required Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 95b54b1378d9d..31d1720471cad 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -89,6 +89,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/alloydb/connectors/apiv1": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/alloydb/connectors/apiv1alpha": { "api_shortname": "connectors", "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", @@ -99,6 +109,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/alloydb/connectors/apiv1beta": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "api_shortname": "analyticsadmin", "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", @@ -136,7 +156,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apikeys/apiv2": { @@ -349,6 +369,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/bigquery/biglake/apiv1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", @@ -659,6 +699,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/config/apiv1": { + "api_shortname": "config", + "distribution_name": "cloud.google.com/go/config/apiv1", + "description": "Infrastructure Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/contactcenterinsights/apiv1": { "api_shortname": "contactcenterinsights", "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", @@ -685,7 +735,7 @@ "description": "Container Analysis API", "language": "go", "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/containeranalysis/apiv1beta1", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", "release_level": "preview", "library_type": "GAPIC_AUTO" }, @@ -922,7 +972,7 @@ "cloud.google.com/go/dlp/apiv2": { "api_shortname": "dlp", "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP) API", + "description": "Cloud Data Loss Prevention (DLP)", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", @@ -1249,6 +1299,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/language/apiv2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv2", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/lifesciences/apiv2beta": { "api_shortname": "lifesciences", "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", @@ -1309,6 +1369,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/fleetengine/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", + "description": "Local Rides and Deliveries API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/delivery/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1", + "description": "Last Mile Fleet Solution Delivery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { "api_shortname": "mapsplatformdatasets", "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", @@ -1509,6 +1589,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/notebooks/apiv2": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv2", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/optimization/apiv1": { "api_shortname": "cloudoptimization", "distribution_name": "cloud.google.com/go/optimization/apiv1", @@ -1626,7 +1716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { @@ -1639,6 +1729,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/policytroubleshooter/iam/apiv3": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", + "description": "Policy Troubleshooter API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/privatecatalog/apiv1beta1": { "api_shortname": "cloudprivatecatalog", "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", @@ -1779,6 +1879,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/redis/cluster/apiv1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/cluster/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/resourcemanager/apiv2": { "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", @@ -1889,6 +1999,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/securesourcemanager/apiv1": { + "api_shortname": "securesourcemanager", + "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", + "description": "Secure Source Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/security/privateca/apiv1": { "api_shortname": "privateca", "distribution_name": "cloud.google.com/go/security/privateca/apiv1", @@ -2009,6 +2129,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/spanner": { "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", @@ -2215,8 +2345,8 @@ "description": "Video Stitcher API", "language": "go", "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/stitcher/apiv1", - "release_level": "preview", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { diff --git a/vendor/cloud.google.com/go/internal/pubsub/message.go b/vendor/cloud.google.com/go/internal/pubsub/message.go index 7d7092b191000..5dc25666a0727 100644 --- a/vendor/cloud.google.com/go/internal/pubsub/message.go +++ b/vendor/cloud.google.com/go/internal/pubsub/message.go @@ -186,7 +186,9 @@ func (m *Message) AckWithResult() *AckResult { if m.ackh != nil { return m.ackh.OnAckWithResult() } - return nil + // When the message was constructed directly rather passed in the callback in `sub.Receive`, + // ready the message with success so calling `AckResult.Get` doesn't panic. + return newSuccessAckResult() } // NackWithResult declines to acknowledge the message which indicates that @@ -206,7 +208,9 @@ func (m *Message) NackWithResult() *AckResult { if m.ackh != nil { return m.ackh.OnNackWithResult() } - return nil + // When the message was constructed directly rather passed in the callback in `sub.Receive`, + // ready the message with success so calling `AckResult.Get` doesn't panic. + return newSuccessAckResult() } // NewMessage creates a message with an AckHandler implementation, which should @@ -219,3 +223,9 @@ func NewMessage(ackh AckHandler) *Message { func MessageAckHandler(m *Message) AckHandler { return m.ackh } + +func newSuccessAckResult() *AckResult { + ar := NewAckResult() + SetAckResult(ar, AcknowledgeStatusSuccess, nil) + return ar +} diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md index 35dbbd01626b6..f0bb878383db5 100644 --- a/vendor/cloud.google.com/go/longrunning/CHANGES.md +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.5.4](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.3...longrunning/v0.5.4) (2023-11-01) + + +### Bug Fixes + +* **longrunning:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [0.5.3](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.2...longrunning/v0.5.3) (2023-10-26) + + +### Bug Fixes + +* **longrunning:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.1...longrunning/v0.5.2) (2023-10-12) + + +### Bug Fixes + +* **longrunning:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + ## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.0...longrunning/v0.5.1) (2023-06-20) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 4f5c84bd39979..44fa0b14f3efc 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -19,8 +19,15 @@ // // # General documentation // -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] // // # Example usage // @@ -75,11 +82,6 @@ // _ = resp // } // -// # Inspecting errors -// -// To see examples of how to inspect errors returned by this package please reference -// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors). -// // # Use of Context // // The ctx passed to NewOperationsClient is used for authentication requests and @@ -87,14 +89,18 @@ // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( "context" - "net/http" "google.golang.org/api/option" - "google.golang.org/grpc/metadata" ) // For more information on implementing a client constructor hook, see @@ -111,30 +117,9 @@ func getVersionClient() string { return versionClient } -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ "", } } - -// buildHeaders extracts metadata from the outgoing context, joins it with any other -// given metadata, and converts them into a http.Header. -func buildHeaders(ctx context.Context, mds ...metadata.MD) http.Header { - if cmd, ok := metadata.FromOutgoingContext(ctx); ok { - mds = append(mds, cmd) - } - md := metadata.Join(mds...) - return http.Header(md) -} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index c524d6fe31697..55bd8235e156b 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/longrunning/operations.proto diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index a8f426d870052..aac45ca32a251 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -36,7 +36,6 @@ import ( httptransport "google.golang.org/api/transport/http" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" ) @@ -293,7 +292,7 @@ type operationsGRPCClient struct { operationsClient longrunningpb.OperationsClient // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD + xGoogHeaders []string } // NewOperationsClient creates a new operations client based on gRPC. @@ -350,7 +349,7 @@ func (c *operationsGRPCClient) Connection() *grpc.ClientConn { func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) + c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} } // Close closes the connection to the API service. The user should invoke this when @@ -367,8 +366,8 @@ type operationsRESTClient struct { // The http client. httpClient *http.Client - // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD + // The x-goog-* headers to be sent with each request. + xGoogHeaders []string // Points back to the CallOptions field of the containing OperationsClient CallOptions **OperationsCallOptions @@ -418,7 +417,7 @@ func defaultOperationsRESTClientOptions() []option.ClientOption { func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) + c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} } // Close closes the connection to the API service. The user should invoke this when @@ -436,9 +435,10 @@ func (c *operationsRESTClient) Connection() *grpc.ClientConn { return nil } func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) it := &OperationIterator{} req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) @@ -481,9 +481,10 @@ func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunn } func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -498,9 +499,10 @@ func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunnin } func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -511,9 +513,10 @@ func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrun } func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -524,7 +527,7 @@ func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrun } func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -582,7 +585,8 @@ func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunn baseUrl.RawQuery = params.Encode() // Build HTTP headers from client and context metadata. - headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { if settings.Path != "" { baseUrl.Path = settings.Path @@ -648,9 +652,11 @@ func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunnin baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) // Build HTTP headers from client and context metadata. - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} resp := &longrunningpb.Operation{} @@ -704,9 +710,11 @@ func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrun baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) // Build HTTP headers from client and context metadata. - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { if settings.Path != "" { baseUrl.Path = settings.Path @@ -754,9 +762,11 @@ func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrun baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName()) // Build HTTP headers from client and context metadata. - md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))} - headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + hds = append(c.xGoogHeaders, hds...) + hds = append(hds, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { if settings.Path != "" { baseUrl.Path = settings.Path @@ -811,7 +821,8 @@ func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunni baseUrl.RawQuery = params.Encode() // Build HTTP headers from client and context metadata. - headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + hds := append(c.xGoogHeaders, "Content-Type", "application/json") + headers := gax.BuildHeaders(ctx, hds...) opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} resp := &longrunningpb.Operation{} diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json index b8766e0f2ea78..fd6db8b96122d 100644 --- a/vendor/cloud.google.com/go/release-please-config-individual.json +++ b/vendor/cloud.google.com/go/release-please-config-individual.json @@ -5,6 +5,12 @@ "separate-pull-requests": true, "tag-separator": "/", "packages": { + "auth": { + "component": "auth" + }, + "auth/oauth2adapt": { + "component": "auth/oauth2adapt" + }, "bigquery": { "component": "bigquery" }, @@ -42,4 +48,4 @@ "plugins": [ "sentence-case" ] -} \ No newline at end of file +} diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 85a0ffcf59f07..d2bd9b417b30a 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -96,6 +96,9 @@ "confidentialcomputing": { "component": "confidentialcomputing" }, + "config": { + "component": "config" + }, "contactcenterinsights": { "component": "contactcenterinsights" }, @@ -303,6 +306,9 @@ "secretmanager": { "component": "secretmanager" }, + "securesourcemanager": { + "component": "securesourcemanager" + }, "security": { "component": "security" }, @@ -324,6 +330,9 @@ "shell": { "component": "shell" }, + "shopping": { + "component": "shopping" + }, "speech": { "component": "speech" }, diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index b9ec72dc3c0f6..30ee040f76b0f 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,100 @@ # Changes +## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) + + +### Bug Fixes + +* **storage:** Rename aux.go to auxiliary.go fixing windows build ([ba23673](https://github.com/googleapis/google-cloud-go/commit/ba23673da7707c31292e4aa29d65b7ac1446d4a6)) + +## [1.35.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.1...storage/v1.35.0) (2023-11-09) + + +### Features + +* **storage:** Change gRPC writes to use bi-directional streams ([#8930](https://github.com/googleapis/google-cloud-go/issues/8930)) ([3e23a36](https://github.com/googleapis/google-cloud-go/commit/3e23a364b1a20c4fda7aef257e4136586ec769a4)) + +## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.0...storage/v1.34.1) (2023-11-01) + + +### Bug Fixes + +* **storage:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.33.0...storage/v1.34.0) (2023-10-31) + + +### Features + +* **storage/internal:** Add match_glob field to ListObjectsRequest ([#8618](https://github.com/googleapis/google-cloud-go/issues/8618)) ([e9ae601](https://github.com/googleapis/google-cloud-go/commit/e9ae6018983ae09781740e4ff939e6e365863dbb)) +* **storage/internal:** Add terminal_storage_class fields to Autoclass message ([57fc1a6](https://github.com/googleapis/google-cloud-go/commit/57fc1a6de326456eb68ef25f7a305df6636ed386)) +* **storage/internal:** Adds the RestoreObject operation ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0)) +* **storage:** Support autoclass v2.1 ([#8721](https://github.com/googleapis/google-cloud-go/issues/8721)) ([fe1e195](https://github.com/googleapis/google-cloud-go/commit/fe1e19590a252c6adc6ca6c51a69b6e561e143b8)) +* **storage:** Support MatchGlob for gRPC ([#8670](https://github.com/googleapis/google-cloud-go/issues/8670)) ([3df0287](https://github.com/googleapis/google-cloud-go/commit/3df0287f88d5e2c4526e9e6b8dc2a4ca54f88918)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) + + +### Bug Fixes + +* **storage:** Drop stream reference after closing it for gRPC writes ([#8872](https://github.com/googleapis/google-cloud-go/issues/8872)) ([525abde](https://github.com/googleapis/google-cloud-go/commit/525abdee433864d4d456f1f1fff5599017b557ff)) +* **storage:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **storage:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **storage:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07) + + +### Features + +* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a)) + +## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15) + + +### Features + +* **storage:** Add support for custom headers ([#8294](https://github.com/googleapis/google-cloud-go/issues/8294)) ([313fd4a](https://github.com/googleapis/google-cloud-go/commit/313fd4a60380d36c5ecaead3e968dbc84d044a0b)) +* **storage:** Add trace span to Writer ([#8375](https://github.com/googleapis/google-cloud-go/issues/8375)) ([f7ac85b](https://github.com/googleapis/google-cloud-go/commit/f7ac85bec2806d351529714bd7744a91a9fdefdd)), refs [#6144](https://github.com/googleapis/google-cloud-go/issues/6144) +* **storage:** Support single-shot uploads in gRPC ([#8348](https://github.com/googleapis/google-cloud-go/issues/8348)) ([7de4a7d](https://github.com/googleapis/google-cloud-go/commit/7de4a7da31ab279a343b1592b15a126cda03e5e7)), refs [#7798](https://github.com/googleapis/google-cloud-go/issues/7798) +* **storage:** Trace span covers life of a Reader ([#8390](https://github.com/googleapis/google-cloud-go/issues/8390)) ([8de30d7](https://github.com/googleapis/google-cloud-go/commit/8de30d752eec2fed2ea4c127482d3e213f9050e2)) + + +### Bug Fixes + +* **storage:** Fix AllObjects condition in gRPC ([#8184](https://github.com/googleapis/google-cloud-go/issues/8184)) ([2b99e4f](https://github.com/googleapis/google-cloud-go/commit/2b99e4f39be20fe21e8bc5c1ec1c0e758222c46e)), refs [#6205](https://github.com/googleapis/google-cloud-go/issues/6205) +* **storage:** Fix gRPC generation/condition issues ([#8396](https://github.com/googleapis/google-cloud-go/issues/8396)) ([ca68ff5](https://github.com/googleapis/google-cloud-go/commit/ca68ff54b680732b59b223655070d0f6abccefee)) +* **storage:** Same method name and Trace Span name ([#8150](https://github.com/googleapis/google-cloud-go/issues/8150)) ([e277213](https://github.com/googleapis/google-cloud-go/commit/e2772133896bb94097b5d1f090f1bcafd136f2ed)) +* **storage:** Update gRPC retry codes ([#8202](https://github.com/googleapis/google-cloud-go/issues/8202)) ([afdf772](https://github.com/googleapis/google-cloud-go/commit/afdf772fc6a90b3010eee9d70ab65e22e276f53f)) + +## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.1...storage/v1.31.0) (2023-06-27) + + +### Features + +* **storage/internal:** Add ctype=CORD for ChecksummedData.content ([ca94e27](https://github.com/googleapis/google-cloud-go/commit/ca94e2724f9e2610b46aefd0a3b5ddc06102e91b)) +* **storage:** Add support for MatchGlob ([#8097](https://github.com/googleapis/google-cloud-go/issues/8097)) ([9426a5a](https://github.com/googleapis/google-cloud-go/commit/9426a5a45d4c2fd07f84261f6d602680e79cdc48)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) [#7728](https://github.com/googleapis/google-cloud-go/issues/7728) +* **storage:** Respect WithEndpoint for SignedURLs and PostPolicy ([#8113](https://github.com/googleapis/google-cloud-go/issues/8113)) ([f918f23](https://github.com/googleapis/google-cloud-go/commit/f918f23a3cda4fbc8d709e32b914ead8b735d664)) +* **storage:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + + +### Bug Fixes + +* **storage:** Fix CreateBucket logic for gRPC ([#8165](https://github.com/googleapis/google-cloud-go/issues/8165)) ([8424e7e](https://github.com/googleapis/google-cloud-go/commit/8424e7e145a117c91006318fa924a8b2643c1c7e)), refs [#8162](https://github.com/googleapis/google-cloud-go/issues/8162) +* **storage:** Fix reads with "./" in object names [XML] ([#8017](https://github.com/googleapis/google-cloud-go/issues/8017)) ([6b7b21f](https://github.com/googleapis/google-cloud-go/commit/6b7b21f8a334b6ad3a25e1f66ae1265b4d1f0995)) +* **storage:** Fix routing header for writes ([#8159](https://github.com/googleapis/google-cloud-go/issues/8159)) ([42a59f5](https://github.com/googleapis/google-cloud-go/commit/42a59f5a23ab9b4743ab032ad92304922c801d93)), refs [#8142](https://github.com/googleapis/google-cloud-go/issues/8142) [#8143](https://github.com/googleapis/google-cloud-go/issues/8143) [#8144](https://github.com/googleapis/google-cloud-go/issues/8144) [#8145](https://github.com/googleapis/google-cloud-go/issues/8145) [#8149](https://github.com/googleapis/google-cloud-go/issues/8149) +* **storage:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) +* **storage:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + + +### Documentation + +* **storage/internal:** Clarifications about behavior of DeleteObject RPC ([3f1ed9c](https://github.com/googleapis/google-cloud-go/commit/3f1ed9c63fb115f47607a3ab478842fe5ba0df11)) +* **storage/internal:** Clarified the behavior of supplying bucket.name field in CreateBucket to reflect actual implementation ([ebae64d](https://github.com/googleapis/google-cloud-go/commit/ebae64d53397ec5dfe851f098754eaa1f5df7cb1)) +* **storage/internal:** Revert ChecksummedData message definition not to specify ctype=CORD, because it would be a breaking change. ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage/internal:** Update routing annotations for CancelResumableWriteRequest and QueryWriteStatusRequest ([4900851](https://github.com/googleapis/google-cloud-go/commit/49008518e168fe6f7891b907d6fc14eecdef758c)) +* **storage/internal:** Updated ChecksummedData message definition to specify ctype=CORD, and removed incorrect earlier attempt that set that annotation in the ReadObjectResponse message definition ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage:** WithXMLReads should mention XML instead of JSON API ([#7881](https://github.com/googleapis/google-cloud-go/issues/7881)) ([36f56c8](https://github.com/googleapis/google-cloud-go/commit/36f56c80c456ca74ffc03df76844ce15980ced82)) + ## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21) diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index e0ab60073c2f4..74799e55e981f 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -20,7 +20,7 @@ import ( "reflect" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 19f266ef1e35b..3818c4498c7cd 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -27,7 +27,7 @@ import ( "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/googleapi" "google.golang.org/api/iamcredentials/v1" "google.golang.org/api/iterator" @@ -152,7 +152,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error // Update updates a bucket's attributes. func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update") defer func() { trace.EndSpan(ctx, err) }() isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 @@ -173,12 +173,18 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( // [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { - if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return SignedURL(b.name, object, opts) - } // Make a copy of opts so we don't modify the pointer parameter. newopts := opts.clone() + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return SignedURL(b.name, object, newopts) + } + if newopts.GoogleAccessID == "" { id, err := b.detectDefaultGoogleAccessID() if err != nil { @@ -215,12 +221,18 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, // // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return GenerateSignedPostPolicyV4(b.name, object, opts) - } // Make a copy of opts so we don't modify the pointer parameter. newopts := opts.clone() + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return GenerateSignedPostPolicyV4(b.name, object, newopts) + } + if newopts.GoogleAccessID == "" { id, err := b.detectDefaultGoogleAccessID() if err != nil { @@ -728,6 +740,13 @@ type Autoclass struct { // If Autoclass is enabled when the bucket is created, the ToggleTime // is set to the bucket creation time. This field is read-only. ToggleTime time.Time + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string + // TerminalStorageClassUpdateTime represents the time of the most recent + // update to "TerminalStorageClass". + TerminalStorageClassUpdateTime time.Time } func newBucket(b *raw.Bucket) (*BucketAttrs, error) { @@ -921,8 +940,6 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { return &storagepb.Bucket{} } - // TODO(cathyo): Handle labels. Pending b/230510191. - var v *storagepb.Bucket_Versioning if ua.VersioningEnabled != nil { v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} @@ -996,6 +1013,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: ua.RPO.String(), Autoclass: ua.Autoclass.toProtoAutoclass(), + Labels: ua.setLabels, } } @@ -1230,9 +1248,11 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Autoclass != nil { rb.Autoclass = &raw.BucketAutoclass{ - Enabled: ua.Autoclass.Enabled, - ForceSendFields: []string{"Enabled"}, + Enabled: ua.Autoclass.Enabled, + TerminalStorageClass: ua.Autoclass.TerminalStorageClass, + ForceSendFields: []string{"Enabled"}, } + rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. @@ -1264,7 +1284,9 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } // If returns a new BucketHandle that applies a set of preconditions. -// Preconditions already set on the BucketHandle are ignored. +// Preconditions already set on the BucketHandle are ignored. The supplied +// BucketConditions must have exactly one field set to a non-zero value; +// otherwise an error will be returned from any operation on the BucketHandle. // Operations on the new handle will return an error if the preconditions are not // satisfied. The only valid preconditions for buckets are MetagenerationMatch // and MetagenerationNotMatch. @@ -1545,7 +1567,6 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { // doc states "format: int32"), so the client types used int64, // but the proto uses int32 so we have a potentially lossy // conversion. - AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), MatchesPrefix: r.Condition.MatchesPrefix, @@ -1555,7 +1576,11 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { }, } - // TODO(#6205): This may not be needed for gRPC + // Only set AgeDays in the proto if it is non-zero, or if the user has set + // Condition.AllObjects. + if r.Condition.AgeInDays != 0 { + rr.Condition.AgeDays = proto.Int32(int32(r.Condition.AgeInDays)) + } if r.Condition.AllObjects { rr.Condition.AgeDays = proto.Int32(0) } @@ -1654,8 +1679,8 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { }, } - // TODO(#6205): This may not be needed for gRPC - if rr.GetCondition().GetAgeDays() == 0 { + // Only set Condition.AllObjects if AgeDays is zero, not if it is nil. + if rr.GetCondition().AgeDays != nil && rr.GetCondition().GetAgeDays() == 0 { r.Condition.AllObjects = true } @@ -1938,9 +1963,10 @@ func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. return &raw.BucketAutoclass{ - Enabled: a.Enabled, + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, } } @@ -1948,27 +1974,34 @@ func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. - return &storagepb.Bucket_Autoclass{ + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. + ba := &storagepb.Bucket_Autoclass{ Enabled: a.Enabled, } + if a.TerminalStorageClass != "" { + ba.TerminalStorageClass = &a.TerminalStorageClass + } + return ba } func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { if a == nil || a.ToggleTime == "" { return nil } - // Return Autoclass.ToggleTime only if parsed with a valid value. + ac := &Autoclass{ + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, + } + // Return ToggleTime and TSCUpdateTime only if parsed with valid values. t, err := time.Parse(time.RFC3339, a.ToggleTime) - if err != nil { - return &Autoclass{ - Enabled: a.Enabled, - } + if err == nil { + ac.ToggleTime = t } - return &Autoclass{ - Enabled: a.Enabled, - ToggleTime: t, + ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime) + if err == nil { + ac.TerminalStorageClassUpdateTime = ut } + return ac } func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { @@ -1976,8 +2009,10 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { return nil } return &Autoclass{ - Enabled: a.GetEnabled(), - ToggleTime: a.GetToggleTime().AsTime(), + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + TerminalStorageClass: a.GetTerminalStorageClass(), + TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(), } } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index cde7cfdba9328..22adb744fe4f8 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -320,6 +320,44 @@ client (using [Client.SetRetry]). For example: // Handle err. } +# Sending Custom Headers + +You can add custom headers to any API call made by this package by using +[callctx.SetHeaders] on the context which is passed to the method. For example, +to add a [custom audit logging] header: + + ctx := context.Background() + ctx = callctx.SetHeaders(ctx, "x-goog-custom-audit-", "") + // Use client as usual with the context and the additional headers will be sent. + client.Bucket("my-bucket").Attrs(ctx) + +# Experimental gRPC API + +This package includes support for the Cloud Storage gRPC API, which is currently +in preview. This implementation uses gRPC rather than the current JSON & XML +APIs to make requests to Cloud Storage. If you would like to try the API, +please contact your GCP account rep for more information. The gRPC API is not +yet generally available, so it may be subject to breaking changes. + +To create a client which will use gRPC, use the alternate constructor: + + ctx := context.Background() + client, err := storage.NewGRPCClient(ctx) + if err != nil { + // TODO: Handle error. + } + // Use client as usual. + +If the application is running within GCP, users may get better performance by +enabling DirectPath (enabling requests to skip some proxy steps). To enable, +set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add +the following side-effect imports to your application: + + import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" + ) + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -327,5 +365,6 @@ client (using [Client.SetRetry]). For example: [gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login [impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview +[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index f77afe317bc0a..99dfba46770f2 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -26,8 +26,9 @@ import ( "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" @@ -39,13 +40,14 @@ import ( ) const ( - // defaultConnPoolSize is the default number of connections + // defaultConnPoolSize is the default number of channels // to initialize in the GAPIC gRPC connection pool. A larger // connection pool may be necessary for jobs that require - // high throughput and/or leverage many concurrent streams. + // high throughput and/or leverage many concurrent streams + // if not running via DirectPath. // // This is only used for the gRPC client. - defaultConnPoolSize = 4 + defaultConnPoolSize = 1 // maxPerMessageWriteSize is the maximum amount of content that can be sent // per WriteObjectRequest message. A buffer reaching this amount will @@ -139,11 +141,11 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin Project: toProjectResource(project), } var resp *storagepb.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -153,7 +155,7 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) b := attrs.toProtoBucket() - b.Name = bucket + b.Project = toProjectResource(project) // If there is lifecycle information but no location, explicitly set // the location. This is a GCS quirk/bug. if b.GetLocation() == "" && b.GetLifecycle() != nil { @@ -161,9 +163,9 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st } req := &storagepb.CreateBucketRequest{ - Parent: toProjectResource(project), + Parent: fmt.Sprintf("projects/%s", globalProjectAlias), Bucket: b, - BucketId: b.GetName(), + BucketId: bucket, } if attrs != nil { req.PredefinedAcl = attrs.PredefinedACL @@ -171,13 +173,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.CreateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -191,26 +193,26 @@ func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opt var gitr *gapic.BucketIterator fetch := func(pageSize int, pageToken string) (token string, err error) { - // Initialize GAPIC-based iterator when pageToken is empty, which - // indicates that this fetch call is attempting to get the first page. - // - // Note: Initializing the GAPIC-based iterator lazily is necessary to - // capture the BucketIterator.Prefix set by the user *after* the - // BucketIterator is returned to them from the veneer. - if pageToken == "" { - req := &storagepb.ListBucketsRequest{ - Parent: toProjectResource(it.projectID), - Prefix: it.Prefix, - } - gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) - } var buckets []*storagepb.Bucket var next string - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(ctx, req, s.gax...) + } buckets, next, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -244,9 +246,9 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteBucket(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -263,13 +265,13 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrBucketNotExist @@ -354,15 +356,24 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.Autoclass != nil { fieldMask.Paths = append(fieldMask.Paths, "autoclass") } - // TODO(cathyo): Handle labels. Pending b/230510191. + + for label := range uattrs.setLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + + // Delete a label by not including it in Bucket.Labels but adding the key to the update mask. + for label := range uattrs.deleteLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + req.UpdateMask = fieldMask var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -375,10 +386,10 @@ func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke return err } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { @@ -397,18 +408,21 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q LexicographicStart: it.query.StartOffset, LexicographicEnd: it.query.EndOffset, IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - gitr := c.raw.ListObjects(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { var objects []*storagepb.Object - err = run(it.ctx, func() error { + var gitr *gapic.ObjectIterator + err = run(it.ctx, func(ctx context.Context) error { + gitr = c.raw.ListObjects(ctx, req, s.gax...) + it.ctx = ctx objects, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { err = ErrBucketNotExist @@ -451,9 +465,9 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { return c.raw.DeleteObject(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } @@ -479,12 +493,12 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string } var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrObjectNotExist @@ -496,11 +510,15 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + // For Update, generation is passed via the object message rather than a field on the request. + if gen >= 0 { + o.Generation = gen + } req := &storagepb.UpdateObjectRequest{ Object: o, PredefinedAcl: uattrs.PredefinedACL, } - if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, conds, req); err != nil { return nil, err } if s.userProject != "" { @@ -540,16 +558,28 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { fieldMask.Paths = append(fieldMask.Paths, "acl") } - // TODO(cathyo): Handle metadata. Pending b/230510191. + + if uattrs.Metadata != nil { + // We don't support deleting a specific metadata key; metadata is deleted + // as a whole if provided an empty map, so we do not use dot notation here + if len(uattrs.Metadata) == 0 { + fieldMask.Paths = append(fieldMask.Paths, "metadata") + } else { + // We can, however, use dot notation for adding keys + for key := range uattrs.Metadata { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("metadata.%s", key)) + } + } + } req.UpdateMask = fieldMask var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { return nil, ErrObjectNotExist } @@ -755,17 +785,18 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) dstObjPb.Name = req.dstObject.name - if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { - return nil, err - } + if req.sendCRC32C { dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C } srcs := []*storagepb.ComposeObjectRequest_SourceObject{} for _, src := range req.srcs { - srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} - if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name, ObjectPreconditions: &storagepb.ComposeObjectRequest_SourceObject_ObjectPreconditions{}} + if src.gen >= 0 { + srcObjPb.Generation = src.gen + } + if err := applyCondsProto("ComposeObject source", defaultGen, src.conds, srcObjPb.ObjectPreconditions); err != nil { return nil, err } srcs = append(srcs, srcObjPb) @@ -775,6 +806,9 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec Destination: dstObjPb, SourceObjects: srcs, } + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, rawReq); err != nil { + return nil, err + } if req.predefinedACL != "" { rawReq.DestinationPredefinedAcl = req.predefinedACL } @@ -784,10 +818,10 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec var obj *storagepb.Object var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } @@ -834,9 +868,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var res *storagepb.RewriteResponse var err error - retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -885,15 +919,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.ReadOffset = params.offset + seen - // A negative length means "read to the end of the object", but the - // read_limit field it corresponds to uses zero to mean the same thing. Thus - // we coerce the length to 0 to read to the end of the object. - if params.length < 0 { - params.length = 0 - } - - // Only set a ReadLimit if length is greater than zero, because zero - // means read it all. + // Only set a ReadLimit if length is greater than zero, because <= 0 means + // to read it all. if params.length > 0 { req.ReadLimit = params.length - seen } @@ -907,7 +934,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange var msg *storagepb.ReadObjectResponse var err error - err = run(cc, func() error { + err = run(cc, func(ctx context.Context) error { stream, err = c.raw.ReadObject(cc, req, s.gax...) if err != nil { return err @@ -921,7 +948,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { // Close the stream context we just created to ensure we don't leak // resources. @@ -963,6 +990,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // client buffer for reading later. leftovers: msg.GetChecksummedData().GetContent(), settings: s, + zeroRange: params.length == 0, }, } @@ -974,8 +1002,15 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange r.remain = size } + // For a zero-length request, explicitly close the stream and set remaining + // bytes to zero. + if params.length == 0 { + r.remain = 0 + r.reader.Close() + } + // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { r.wantCRC = checksums.GetCrc32C() r.checkCRC = true } @@ -1015,11 +1050,13 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage } // The chunk buffer is full, but there is no end in sight. This - // means that a resumable upload will need to be used to send + // means that either: + // 1. A resumable upload will need to be used to send // multiple chunks, until we are done reading data. Start a // resumable upload if it has not already been started. - // Otherwise, all data will be sent over a single gRPC stream. - if !doneReading && gw.upid == "" { + // 2. ChunkSize of zero may also have a full buffer, but a resumable + // session should not be initiated in this case. + if !doneReading && gw.upid == "" && params.chunkSize != 0 { err = gw.startResumableUpload() if err != nil { err = checkCanceled(err) @@ -1029,22 +1066,28 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage } } - o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) + o, off, err := gw.uploadBuffer(recvd, offset, doneReading) if err != nil { err = checkCanceled(err) errorf(err) pr.CloseWithError(err) return } - // At this point, the current buffer has been uploaded. Capture the - // committed offset here in case the upload was not finalized and - // another chunk is to be uploaded. - offset = off - progress(offset) - - // When we are done reading data and the chunk has been finalized, - // we are done. - if doneReading && finalized { + + // At this point, the current buffer has been uploaded. For resumable + // uploads and chunkSize = 0, capture the committed offset here in case + // the upload was not finalized and another chunk is to be uploaded. Call + // the progress function for resumable uploads only. + if gw.upid != "" || gw.chunkSize == 0 { + offset = off + } + if gw.upid != "" { + progress(offset) + } + + // When we are done reading data without errors, set the object and + // finish. + if doneReading { // Build Object from server's response. setObj(newObjectFromProto(o)) return @@ -1067,11 +1110,11 @@ func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, v }, } var rp *iampb.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return rp, err } @@ -1085,10 +1128,10 @@ func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, p Policy: policy, } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -1099,11 +1142,11 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str Permissions: permissions, } var res *iampb.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1122,11 +1165,11 @@ func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID st ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1148,13 +1191,13 @@ func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc projectID: project, retry: s.retry, } - gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1201,11 +1244,11 @@ func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1222,11 +1265,11 @@ func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1245,9 +1288,9 @@ func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1264,7 +1307,7 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string Parent: bucketResourceName(globalProjectAlias, bucket), } var notifications []*storagepb.NotificationConfig - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) for { // PageSize is not set and fallbacks to the API default pageSize of 100. @@ -1279,7 +1322,7 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string } req.PageToken = nextPageToken } - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1297,11 +1340,11 @@ func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket strin NotificationConfig: toProtoNotification(n), } var pbn *storagepb.NotificationConfig - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { var err error pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1314,9 +1357,9 @@ func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket strin s := callSettings(c.settings, opts...) req := &storagepb.DeleteNotificationConfigRequest{Name: id} - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -1335,6 +1378,7 @@ type readStreamResponse struct { type gRPCReader struct { seen, size int64 + zeroRange bool stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte @@ -1344,7 +1388,12 @@ type gRPCReader struct { // Read reads bytes into the user's buffer from an open gRPC stream. func (r *gRPCReader) Read(p []byte) (int, error) { - // No stream to read from, either never initiliazed or Close was called. + // The entire object has been read by this reader, return EOF. + if r.size == r.seen || r.zeroRange { + return 0, io.EOF + } + + // No stream to read from, either never initialized or Close was called. // Note: There is a potential concurrency issue if multiple routines are // using the same reader. One encounters an error and the stream is closed // and then reopened while the other routine attempts to read from it. @@ -1352,11 +1401,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) { return 0, fmt.Errorf("reader has been closed") } - // The entire object has been read by this reader, return EOF. - if r.size != 0 && r.size == r.seen { - return 0, io.EOF - } - var n int // Read leftovers and return what was available to conform to the Reader // interface: https://pkg.go.dev/io#Reader. @@ -1447,14 +1491,17 @@ func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { size := params.chunkSize + + // Round up chunksize to nearest 256KiB + if size%googleapi.MinUploadChunkSize != 0 { + size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) + } + + // A completely bufferless upload is not possible as it is in JSON because + // the buffer must be provided to the message. However use the minimum size + // possible in this case. if params.chunkSize == 0 { - // TODO: Should we actually use the minimum of 256 KB here when the user - // indicates they want minimal memory usage? We cannot do a zero-copy, - // bufferless upload like HTTP/JSON can. - // TODO: We need to determine if we can avoid starting a - // resumable upload when the user *plans* to send more than bufSize but - // with a bufferless upload. - size = maxPerMessageWriteSize + size = googleapi.MinUploadChunkSize } return &gRPCWriter{ @@ -1467,6 +1514,7 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) conds: params.conds, encryptionKey: params.encryptionKey, sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, } } @@ -1486,9 +1534,10 @@ type gRPCWriter struct { settings *settings sendCRC32C bool + chunkSize int // The gRPC client-stream used for sending buffers. - stream storagepb.Storage_WriteObjectClient + stream storagepb.Storage_BidiWriteObjectClient // The Resumable Upload ID started by a gRPC-based Writer. upid string @@ -1509,89 +1558,101 @@ func (w *gRPCWriter) startResumableUpload() error { // the upload, but in the future, we must also support sending it // on the *last* message of the stream. req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func() error { + return run(w.ctx, func(ctx context.Context) error { upres, err := w.c.raw.StartResumableWrite(w.ctx, req) w.upid = upres.GetUploadId() return err - }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, w.settings.idempotent) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. func (w *gRPCWriter) queryProgress() (int64, error) { var persistedSize int64 - err := run(w.ctx, func() error { + err := run(w.ctx, func(ctx context.Context) error { q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ UploadId: w.upid, }) persistedSize = q.GetPersistedSize() return err - }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, true) // q.GetCommittedSize() will return 0 if q is nil. return persistedSize, err } -// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if -// uploading a chunk for a resumable uploadBuffer), and will mark the write as -// finished if we are done receiving data from the user. The resulting write -// offset after uploading the buffer is returned, as well as a boolean -// indicating if the Object has been finalized. If it has been finalized, the -// final Object will be returned as well. Finalizing the upload is primarily -// important for Resumable Uploads. A simple or multi-part upload will always -// be finalized once the entire buffer has been written. -func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { - var err error - var finishWrite bool - var sent, limit int = 0, maxPerMessageWriteSize +// uploadBuffer uploads the buffer at the given offset using a bi-directional +// Write stream. It will open a new stream if necessary (on the first call or +// after resuming from failure). The resulting write offset after uploading the +// buffer is returned, as well as well as the final Object if the upload is +// completed. +// +// Returns object, persisted size, and any error that is not retriable. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) { var shouldRetry = ShouldRetry if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { shouldRetry = w.settings.retry.shouldRetry } - offset := start + + var err error + var lastWriteOfEntireObject bool + + sent := 0 + writeOffset := start + toWrite := w.buf[:recvd] + + // Send a request with as many bytes as possible. + // Loop until all bytes are sent. for { - first := sent == 0 - // This indicates that this is the last message and the remaining - // data fits in one message. - belowLimit := recvd-sent <= limit - if belowLimit { - limit = recvd - sent + bytesNotYetSent := recvd - sent + remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize + + if remainingDataFitsInSingleReq && doneReading { + lastWriteOfEntireObject = true } - if belowLimit && doneReading { - finishWrite = true + + // Send the maximum amount of bytes we can, unless we don't have that many. + bytesToSendInCurrReq := maxPerMessageWriteSize + if remainingDataFitsInSingleReq { + bytesToSendInCurrReq = bytesNotYetSent } // Prepare chunk section for upload. - data := toWrite[sent : sent+limit] - req := &storagepb.WriteObjectRequest{ - Data: &storagepb.WriteObjectRequest_ChecksummedData{ + data := toWrite[sent : sent+bytesToSendInCurrReq] + + req := &storagepb.BidiWriteObjectRequest{ + Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ ChecksummedData: &storagepb.ChecksummedData{ Content: data, }, }, - WriteOffset: offset, - FinishWrite: finishWrite, + WriteOffset: writeOffset, + FinishWrite: lastWriteOfEntireObject, + Flush: remainingDataFitsInSingleReq, + StateLookup: remainingDataFitsInSingleReq, } - // Open a new stream and set the first_message field on the request. - // The first message on the WriteObject stream must either be the - // Object or the Resumable Upload ID. - if first { - ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) - w.stream, err = w.c.raw.WriteObject(ctx) + // Open a new stream if necessary and set the first_message field on + // the request. The first message on the WriteObject stream must either + // be the Object or the Resumable Upload ID. + if w.stream == nil { + hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} + ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) + + w.stream, err = w.c.raw.BidiWriteObject(ctx) if err != nil { - return nil, 0, false, err + return nil, 0, err } - if w.upid != "" { - req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} - } else { + if w.upid != "" { // resumable upload + req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} + } else { // non-resumable spec, err := w.writeObjectSpec() if err != nil { - return nil, 0, false, err + return nil, 0, err } - req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ + req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ WriteObjectSpec: spec, } req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) @@ -1601,38 +1662,53 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // on the *last* message of the stream (instead of the first). req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) } - } err = w.stream.Send(req) if err == io.EOF { // err was io.EOF. The client-side of a stream only gets an EOF on Send // when the backend closes the stream and wants to return an error - // status. Closing the stream receives the status as an error. - _, err = w.stream.CloseAndRecv() + // status. + + // Receive from the stream Recv() until it returns a non-nil error + // to receive the server's status as an error. We may get multiple + // messages before the error due to buffering. + err = nil + for err == nil { + _, err = w.stream.Recv() + } + // Drop the stream reference as a new one will need to be created if + // we retry. + w.stream = nil + + // Drop the stream reference as a new one will need to be created if + // we can retry the upload + w.stream = nil // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. + // If not retriable, falling through will return the error received. if shouldRetry(err) { - sent = 0 - finishWrite = false // TODO: Add test case for failure modes of querying progress. - offset, err = w.determineOffset(start) - if err == nil { - continue + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err } + sent = int(writeOffset) - int(start) + + // Continue sending requests, opening a new stream and resending + // any bytes not yet persisted as per QueryWriteStatus + continue } } if err != nil { - return nil, 0, false, err + return nil, 0, err } // Update the immediate stream's sent total and the upload offset with // the data sent. sent += len(data) - offset += int64(len(data)) + writeOffset += int64(len(data)) // Not done sending data, do not attempt to commit it yet, loop around // and send more data. @@ -1640,25 +1716,82 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st continue } - // Done sending data. Close the stream to "commit" the data sent. - resp, finalized, err := w.commit() + // The buffer has been uploaded and there is still more data to be + // uploaded, but this is not a resumable upload session. Therefore, + // don't check persisted data. + if !lastWriteOfEntireObject && w.chunkSize == 0 { + return nil, writeOffset, nil + } + + // Done sending data (remainingDataFitsInSingleReq should == true if we + // reach this code). Receive from the stream to confirm the persisted data. + resp, err := w.stream.Recv() + // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received // from closing the stream. if shouldRetry(err) { - sent = 0 - finishWrite = false - offset, err = w.determineOffset(start) - if err == nil { - continue + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err } + sent = int(writeOffset) - int(start) + + // Drop the stream reference as a new one will need to be created. + w.stream = nil + + continue } if err != nil { - return nil, 0, false, err + return nil, 0, err } - return resp.GetResource(), offset, finalized, nil + // Confirm the persisted data if we have not finished uploading the object. + if !lastWriteOfEntireObject { + if resp.GetPersistedSize() != writeOffset { + // Retry if not all bytes were persisted. + writeOffset = resp.GetPersistedSize() + sent = int(writeOffset) - int(start) + continue + } + } else { + // If the object is done uploading, close the send stream to signal + // to the server that we are done sending so that we can receive + // from the stream without blocking. + err = w.stream.CloseSend() + if err != nil { + // CloseSend() retries the send internally. It never returns an + // error in the current implementation, but we check it anyway in + // case that it does in the future. + return nil, 0, err + } + + // Stream receives do not block once send is closed, but we may not + // receive the response with the object right away; loop until we + // receive the object or error out. + var obj *storagepb.Object + for obj == nil { + resp, err := w.stream.Recv() + if err != nil { + return nil, 0, err + } + + obj = resp.GetResource() + } + + // Even though we received the object response, continue reading + // until we receive a non-nil error, to ensure the stream does not + // leak even if the context isn't cancelled. See: + // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream + for err == nil { + _, err = w.stream.Recv() + } + + return obj, writeOffset, nil + } + + return nil, writeOffset, nil } } @@ -1678,26 +1811,6 @@ func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { return offset, nil } -// commit closes the stream to commit the data sent and potentially receive -// the finalized object if finished uploading. If the last request sent -// indicated that writing was finished, the Object will be finalized and -// returned. If not, then the Object will be nil, and the boolean returned will -// be false. -func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { - finalized := true - resp, err := w.stream.CloseAndRecv() - if err == io.EOF { - // Closing a stream for a resumable upload finish_write = false results - // in an EOF which can be ignored, as we aren't done uploading yet. - finalized = false - err = nil - } - // Drop the stream reference as it has been closed. - w.stream = nil - - return resp, finalized, err -} - // writeObjectSpec constructs a WriteObjectSpec proto using the Writer's // ObjectAttrs and applies its Conditions. This is only used for gRPC. func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 422a7c2335bdc..1b9fbe9dd2021 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,14 +20,12 @@ import ( "fmt" "time" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) // HMACState is the state of the HMAC key. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACState string const ( @@ -50,8 +48,6 @@ const ( // // HMAC keys are used to authenticate signed access to objects. To enable HMAC key // authentication, please visit https://cloud.google.com/storage/docs/migrating. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKey struct { // The HMAC's secret key. Secret string @@ -85,8 +81,6 @@ type HMACKey struct { } // HMACKeyHandle helps provide access and management for HMAC keys. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyHandle struct { projectID string accessID string @@ -95,8 +89,6 @@ type HMACKeyHandle struct { } // HMACKeyHandle creates a handle that will be used for HMACKey operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { return &HMACKeyHandle{ projectID: projectID, @@ -111,8 +103,6 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // Options such as UserProjectForHMACKeys can be used to set the // userProject to be billed against for operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -128,8 +118,6 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. // Only inactive HMAC keys can be deleted. // After deletion, a key cannot be used to authenticate requests. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -187,8 +175,6 @@ func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { } // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") @@ -208,8 +194,6 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma } // HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyAttrsToUpdate struct { // State is required and must be either StateActive or StateInactive. State HMACState @@ -219,8 +203,6 @@ type HMACKeyAttrsToUpdate struct { } // Update mutates the HMACKey referred to by accessID. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) @@ -240,8 +222,6 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt // An HMACKeysIterator is an iterator over HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeysIterator struct { ctx context.Context raw *raw.ProjectsHmacKeysService @@ -257,8 +237,6 @@ type HMACKeysIterator struct { // ListHMACKeys returns an iterator for listing HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -274,8 +252,6 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMA // calls will return iterator.Done. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) Next() (*HMACKey, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -290,8 +266,6 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { @@ -315,12 +289,11 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, call = call.MaxResults(int64(pageSize)) } - ctx := it.ctx var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { resp, err = call.Context(ctx).Do() return err - }, it.retry, true, setRetryHeaderHTTP(call)) + }, it.retry, true) if err != nil { return "", err } @@ -345,8 +318,6 @@ type hmacKeyDesc struct { } // HMACKeyOption configures the behavior of HMACKey related methods and actions. -// -// This interface is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyOption interface { withHMACKeyDesc(*hmacKeyDesc) } @@ -362,8 +333,6 @@ func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { // // Only one service account email can be used as a filter, so if multiple // of these options are applied, the last email to be set will be used. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.forServiceAccountEmail = serviceAccountEmail @@ -371,8 +340,6 @@ func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { } // ShowDeletedHMACKeys will also list keys whose state is "DELETED". -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func ShowDeletedHMACKeys() HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.showDeletedKeys = true @@ -383,8 +350,6 @@ func ShowDeletedHMACKeys() HMACKeyOption { // if userProjectID is non-empty. // // Note: This is a noop right now and only provided for API compatibility. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.userProjectID = userProjectID diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 2d0cbe117ec32..b62f009da1d23 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -32,6 +32,7 @@ import ( "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" + "github.com/googleapis/gax-go/v2/callctx" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -44,12 +45,10 @@ import ( // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic // storageClient interface. -// -// This is an experimental API and not intended for public use. type httpStorageClient struct { creds *google.Credentials hc *http.Client - readHost string + xmlHost string raw *raw.Service scheme string settings *settings @@ -58,8 +57,6 @@ type httpStorageClient struct { // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON // Storage API. -// -// This is an experimental API and not intended for public use. func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) o := s.clientOption @@ -123,7 +120,7 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl if err != nil { return nil, fmt.Errorf("storage client: %w", err) } - // Update readHost and scheme with the chosen endpoint. + // Update xmlHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) @@ -132,7 +129,7 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl return &httpStorageClient{ creds: creds, hc: hc, - readHost: u.Host, + xmlHost: u.Host, raw: rawService, scheme: u.Scheme, settings: s, @@ -151,11 +148,11 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin s := callSettings(c.settings, opts...) call := c.raw.Projects.ServiceAccount.Get(project) var res *raw.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -185,14 +182,14 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { b, err := req.Context(ctx).Do() if err != nil { return err } battrs, err = newBucket(b) return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) return battrs, err } @@ -213,10 +210,10 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt req.MaxResults(int64(pageSize)) } var resp *raw.Buckets - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -251,7 +248,7 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con req.UserProject(s.userProject) } - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -267,10 +264,10 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds } var resp *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -301,10 +298,10 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat } var rawBucket *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rawBucket, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -320,10 +317,10 @@ func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke } req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { s := callSettings(c.settings, opts...) @@ -347,6 +344,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.EndOffset(it.query.EndOffset) req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) + req.MatchGlob(it.query.MatchGlob) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } @@ -359,10 +357,10 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } var resp *raw.Objects var err error - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -397,7 +395,7 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { req.UserProject(s.userProject) } - err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return ErrObjectNotExist @@ -419,10 +417,10 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string } var obj *raw.Object var err error - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -501,7 +499,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str rawObj := attrs.toRawObject(bucket) rawObj.ForceSendFields = forceSendFields rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full") if err := applyConds("Update", gen, conds, call); err != nil { return nil, err } @@ -516,7 +514,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str } var obj *raw.Object var err error - err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) var e *googleapi.Error if errors.As(err, &e) && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -533,7 +531,7 @@ func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s s := callSettings(c.settings, opts...) req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -542,10 +540,10 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st var err error req := c.raw.DefaultObjectAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -562,14 +560,13 @@ func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket s Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + req := c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Bucket ACL methods. @@ -578,7 +575,7 @@ func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, s := callSettings(c.settings, opts...) req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -587,10 +584,10 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o var err error req := c.raw.BucketAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -607,10 +604,10 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) var err error - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // configureACLCall sets the context, user project and headers on the apiary library call. @@ -630,7 +627,7 @@ func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object s := callSettings(c.settings, opts...) req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. @@ -641,10 +638,10 @@ func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object s var err error req := c.raw.ObjectAccessControls.List(bucket, object) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -663,14 +660,13 @@ func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + req := c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Media operations. @@ -694,7 +690,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) } - call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq) if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } @@ -711,9 +707,9 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec setClientHeader(call.Header()) var err error - retryCall := func() error { obj, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } return newObject(obj), nil @@ -723,7 +719,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec rawObject := req.dstObject.attrs.toRawObject("") call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) - call.Context(ctx).Projection("full") + call.Projection("full") if req.token != "" { call.RewriteToken(req.token) } @@ -759,9 +755,9 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var err error setClientHeader(call.Header()) - retryCall := func() error { res, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -790,9 +786,10 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { u := &url.URL{ - Scheme: c.scheme, - Host: c.readHost, - Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + Scheme: c.scheme, + Host: c.xmlHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + RawPath: fmt.Sprintf("/%s/%s", params.bucket, url.PathEscape(params.object)), } verb := "GET" if params.length == 0 { @@ -802,7 +799,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa if err != nil { return nil, err } - req = req.WithContext(ctx) if s.userProject != "" { req.Header.Set("X-Goog-User-Project", s.userProject) @@ -812,8 +808,17 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa return nil, err } + // Set custom headers passed in via the context. This is only required for XML; + // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + for _, v := range vals { + req.Header.Add(k, v) + } + } + reopen := readerReopen(ctx, req.Header, params, s, - func() (*http.Response, error) { return c.hc.Do(req) }, + func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -828,7 +833,6 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR call := c.raw.Objects.Get(params.bucket, params.object) setClientHeader(call.Header()) - call.Context(ctx) call.Projection("full") if s.userProject != "" { @@ -839,7 +843,7 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR return nil, err } - reopen := readerReopen(ctx, call.Header(), params, s, func() (*http.Response, error) { return call.Download() }, + reopen := readerReopen(ctx, call.Header(), params, s, func(ctx context.Context) (*http.Response, error) { return call.Context(ctx).Download() }, func() error { return applyConds("NewReader", params.gen, params.conds, call) }, func() { call.Generation(params.gen) }) @@ -949,11 +953,11 @@ func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, v call.UserProject(s.userProject) } var rp *raw.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -970,10 +974,10 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -984,11 +988,11 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str call.UserProject(s.userProject) } var res *raw.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1006,10 +1010,10 @@ func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID st var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1046,10 +1050,10 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc } var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { - resp, err = call.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1091,10 +1095,10 @@ func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1111,11 +1115,11 @@ func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceA } var hk *raw.HmacKey - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { h, err := call.Context(ctx).Do() hk = h return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } return toHMACKeyFromRaw(hk, true) @@ -1127,9 +1131,9 @@ func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { call = call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1148,10 +1152,10 @@ func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string call.UserProject(s.userProject) } var res *raw.Notifications - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { res, err = call.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(call)) + }, s.retry, true) if err != nil { return nil, err } @@ -1168,10 +1172,10 @@ func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket strin call.UserProject(s.userProject) } var rn *raw.Notification - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rn, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1187,9 +1191,9 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin if s.userProject != "" { call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } type httpReader struct { @@ -1238,7 +1242,7 @@ func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { // readerReopen initiates a Read with offset and length, assuming we // have already read seen bytes. func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, - doDownload func() (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { + doDownload func(context.Context) (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { return func(seen int64) (*http.Response, error) { // If the context has already expired, return immediately without making a // call. @@ -1265,8 +1269,8 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade var err error var res *http.Response - err = run(ctx, func() error { - res, err = doDownload() + err = run(ctx, func(ctx context.Context) error { + res, err = doDownload(ctx) if err != nil { var e *googleapi.Error if errors.As(err, &e) { @@ -1320,7 +1324,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade params.gen = gen64 } return nil - }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1373,6 +1377,8 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen remain := res.ContentLength body := res.Body + // If the user requested zero bytes, explicitly close and remove the request + // body. if params.length == 0 { remain = 0 body.Close() diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go new file mode 100644 index 0000000000000..c6fd4b341ffa3 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -0,0 +1,210 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + "google.golang.org/api/iterator" +) + +// BucketIterator manages a stream of *storagepb.Bucket. +type BucketIterator struct { + items []*storagepb.Bucket + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *BucketIterator) Next() (*storagepb.Bucket, error) { + var item *storagepb.Bucket + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *BucketIterator) bufLen() int { + return len(it.items) +} + +func (it *BucketIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. +type HmacKeyMetadataIterator struct { + items []*storagepb.HmacKeyMetadata + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { + var item *storagepb.HmacKeyMetadata + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *HmacKeyMetadataIterator) bufLen() int { + return len(it.items) +} + +func (it *HmacKeyMetadataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. +type NotificationConfigIterator struct { + items []*storagepb.NotificationConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { + var item *storagepb.NotificationConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ObjectIterator manages a stream of *storagepb.Object. +type ObjectIterator struct { + items []*storagepb.Object + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ObjectIterator) Next() (*storagepb.Object, error) { + var item *storagepb.Object + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) bufLen() int { + return len(it.items) +} + +func (it *ObjectIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index e33b5222ab387..8159589ea9be6 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -17,14 +17,29 @@ // Package storage is an auto-generated package for the // Cloud Storage API. // -// Lets you store and retrieve potentially-large, immutable data objects. -// -// NOTE: This package is in alpha. It is not stable, and is likely to change. +// Stop. This folder is likely not what you are looking for. This folder +// contains protocol buffer definitions for an unreleased API for accessing +// Cloud Storage. Unless told otherwise by a Google Cloud representative, do +// not use any of the contents of this folder. If you would like to use Cloud +// Storage, please consult our official documentation (at +// https://cloud.google.com/storage/docs/apis) for details on our XML and +// JSON APIs, or else consider one of our client libraries (at +// https://cloud.google.com/storage/docs/reference/libraries). This API +// defined in this folder is unreleased and may shut off, break, or fail at +// any time for any users who are not registered as a part of a private +// preview program. // // # General documentation // -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] // // # Example usage // @@ -61,15 +76,32 @@ // // TODO: Handle error. // } // defer c.Close() -// -// req := &storagepb.DeleteBucketRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. -// } -// err = c.DeleteBucket(ctx, req) +// stream, err := c.BidiWriteObject(ctx) // if err != nil { // // TODO: Handle error. // } +// go func() { +// reqs := []*storagepb.BidiWriteObjectRequest{ +// // TODO: Create requests. +// } +// for _, req := range reqs { +// if err := stream.Send(req); err != nil { +// // TODO: Handle error. +// } +// } +// stream.CloseSend() +// }() +// for { +// resp, err := stream.Recv() +// if err == io.EOF { +// break +// } +// if err != nil { +// // TODO: handle error. +// } +// // TODO: Use resp. +// _ = resp +// } // // # Use of Context // @@ -78,18 +110,18 @@ // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package storage // import "cloud.google.com/go/storage/internal/apiv2" import ( "context" - "os" - "runtime" - "strconv" - "strings" - "unicode" "google.golang.org/api/option" - "google.golang.org/grpc/metadata" ) // For more information on implementing a client constructor hook, see @@ -106,27 +138,6 @@ func getVersionClient() string { return versionClient } -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ @@ -137,40 +148,3 @@ func DefaultAuthScopes() []string { "https://www.googleapis.com/auth/devstorage.read_write", } } - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 8bbb154c073ae..56256bb2cca2f 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "Client", "rpcs": { + "BidiWriteObject": { + "methods": [ + "BidiWriteObject" + ] + }, "CancelResumableWrite": { "methods": [ "CancelResumableWrite" @@ -120,6 +125,11 @@ "ReadObject" ] }, + "RestoreObject": { + "methods": [ + "RestoreObject" + ] + }, "RewriteObject": { "methods": [ "RewriteObject" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go b/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go deleted file mode 100644 index 6ff86c4fb49f9..0000000000000 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -// InsertMetadata inserts the given gRPC metadata into the outgoing context. -func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - return insertMetadata(ctx, mds...) -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 811d60d489de7..64819950600cc 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -23,16 +23,17 @@ import ( "net/url" "regexp" "strings" + "time" iampb "cloud.google.com/go/iam/apiv1/iampb" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" "google.golang.org/grpc" - "google.golang.org/grpc/metadata" + "google.golang.org/grpc/codes" "google.golang.org/protobuf/proto" ) @@ -55,11 +56,13 @@ type CallOptions struct { ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption + RestoreObject []gax.CallOption CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption UpdateObject []gax.CallOption WriteObject []gax.CallOption + BidiWriteObject []gax.CallOption ListObjects []gax.CallOption RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption @@ -86,36 +89,419 @@ func defaultGRPCClientOptions() []option.ClientOption { func defaultCallOptions() *CallOptions { return &CallOptions{ - DeleteBucket: []gax.CallOption{}, - GetBucket: []gax.CallOption{}, - CreateBucket: []gax.CallOption{}, - ListBuckets: []gax.CallOption{}, - LockBucketRetentionPolicy: []gax.CallOption{}, - GetIamPolicy: []gax.CallOption{}, - SetIamPolicy: []gax.CallOption{}, - TestIamPermissions: []gax.CallOption{}, - UpdateBucket: []gax.CallOption{}, - DeleteNotificationConfig: []gax.CallOption{}, - GetNotificationConfig: []gax.CallOption{}, - CreateNotificationConfig: []gax.CallOption{}, - ListNotificationConfigs: []gax.CallOption{}, - ComposeObject: []gax.CallOption{}, - DeleteObject: []gax.CallOption{}, - CancelResumableWrite: []gax.CallOption{}, - GetObject: []gax.CallOption{}, - ReadObject: []gax.CallOption{}, - UpdateObject: []gax.CallOption{}, - WriteObject: []gax.CallOption{}, - ListObjects: []gax.CallOption{}, - RewriteObject: []gax.CallOption{}, - StartResumableWrite: []gax.CallOption{}, - QueryWriteStatus: []gax.CallOption{}, - GetServiceAccount: []gax.CallOption{}, - CreateHmacKey: []gax.CallOption{}, - DeleteHmacKey: []gax.CallOption{}, - GetHmacKey: []gax.CallOption{}, - ListHmacKeys: []gax.CallOption{}, - UpdateHmacKey: []gax.CallOption{}, + DeleteBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListBuckets: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + LockBucketRetentionPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + SetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + TestIamPermissions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListNotificationConfigs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ComposeObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RestoreObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CancelResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ReadObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + WriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + BidiWriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListObjects: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RewriteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + StartResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + QueryWriteStatus: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetServiceAccount: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListHmacKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, } } @@ -139,11 +525,13 @@ type internalClient interface { ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) @@ -239,16 +627,16 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L // GetIamPolicy gets the IAM policy for a specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } // SetIamPolicy updates an IAM policy for the specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -256,8 +644,8 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques // TestIamPermissions tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } @@ -296,13 +684,29 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject return c.internalClient.ComposeObject(ctx, req, opts...) } -// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning -// is not enabled for the bucket, or if the generation parameter is used. +// DeleteObject deletes an object and its metadata. +// +// Deletions are normally permanent when versioning is disabled or whenever +// the generation parameter is used. However, if soft delete is enabled for +// the bucket, deleted objects can be restored using RestoreObject until the +// soft delete retention period has passed. func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteObject(ctx, req, opts...) } +// RestoreObject restores a soft-deleted object. +func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.RestoreObject(ctx, req, opts...) +} + // CancelResumableWrite cancels an in-progress resumable upload. +// +// Any attempts to write to the resumable upload after cancelling the upload +// will fail. +// +// The behavior for currently in progress write operations is not guaranteed - +// they could either complete before the cancellation or fail if the +// cancellation completes first. func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { return c.internalClient.CancelResumableWrite(ctx, req, opts...) } @@ -369,6 +773,9 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // incur a performance cost over resuming at the correct write offset. // This behavior can make client-side handling simpler in some cases. // +// Clients must only send data that is a multiple of 256 KiB per message, +// unless the object is being finished with finish_write set to true. +// // The service will not view the object as complete until the client has // sent a WriteObjectRequest with finish_write set to true. Sending any // requests on a stream after sending a request with finish_write set to @@ -379,10 +786,33 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object’s // metadata. +// +// Alternatively, the BidiWriteObject operation may be used to write an +// object with controls over flushing and the ability to fetch the ability to +// determine the current persisted size. func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } +// BidiWriteObject stores a new object and metadata. +// +// This is similar to the WriteObject call with the added support for +// manual flushing of persisted state, and the ability to determine current +// persisted size without closing the stream. +// +// The client may specify one or both of the state_lookup and flush fields +// in each BidiWriteObjectRequest. If flush is specified, the data written +// so far will be persisted to storage. If state_lookup is specified, the +// service will respond with a BidiWriteObjectResponse that contains the +// persisted size. If both flush and state_lookup are specified, the flush +// will always occur before a state_lookup, so that both may be set in the +// same request and the returned state will be the state of the object +// post-flush. When the stream is closed, a BidiWriteObjectResponse will +// always be sent to the client, regardless of the value of state_lookup. +func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + return c.internalClient.BidiWriteObject(ctx, opts...) +} + // ListObjects retrieves a list of objects matching the criteria. func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { return c.internalClient.ListObjects(ctx, req, opts...) @@ -455,9 +885,6 @@ type gRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - // Points back to the CallOptions field of the containing Client CallOptions **CallOptions @@ -465,7 +892,7 @@ type gRPCClient struct { client storagepb.StorageClient // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD + xGoogHeaders []string } // NewClient creates a new storage client based on gRPC. @@ -503,11 +930,6 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error clientOpts = append(clientOpts, hookOpts...) } - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) if err != nil { return nil, err @@ -515,10 +937,9 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client := Client{CallOptions: defaultCallOptions()} c := &gRPCClient{ - connPool: connPool, - disableDeadlines: disableDeadlines, - client: storagepb.NewStorageClient(connPool), - CallOptions: &client.CallOptions, + connPool: connPool, + client: storagepb.NewStorageClient(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() @@ -539,9 +960,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) + c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} } // Close closes the connection to the API service. The user should invoke this when @@ -560,9 +981,10 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -582,9 +1004,10 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -611,9 +1034,10 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -637,9 +1061,10 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) it := &BucketIterator{} req = proto.Clone(req).(*storagepb.ListBucketsRequest) @@ -691,9 +1116,10 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -720,9 +1146,10 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -749,9 +1176,10 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -778,9 +1206,10 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -804,9 +1233,10 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -830,9 +1260,10 @@ func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagep routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -852,9 +1283,10 @@ func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.G routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -878,9 +1310,10 @@ func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagep routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -904,9 +1337,10 @@ func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) it := &NotificationConfigIterator{} req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) @@ -958,9 +1392,10 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -984,9 +1419,10 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -996,6 +1432,33 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje return err } +func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RestoreObject[0:len((*c.CallOptions).RestoreObject):len((*c.CallOptions).RestoreObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1006,9 +1469,10 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) var resp *storagepb.CancelResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1032,9 +1496,10 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1058,9 +1523,10 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) var resp storagepb.Storage_ReadObjectClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1084,9 +1550,10 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1101,7 +1568,7 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje } func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) var resp storagepb.Storage_WriteObjectClient opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1115,6 +1582,21 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s return resp, nil } +func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + var resp storagepb.Storage_BidiWriteObjectClient + opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1125,9 +1607,10 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) it := &ObjectIterator{} req = proto.Clone(req).(*storagepb.ListObjectsRequest) @@ -1182,9 +1665,10 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) var resp *storagepb.RewriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1208,9 +1692,10 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) var resp *storagepb.StartResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1234,9 +1719,10 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) var resp *storagepb.QueryWriteStatusResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1260,9 +1746,10 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) var resp *storagepb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1286,9 +1773,10 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) var resp *storagepb.CreateHmacKeyResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1312,9 +1800,10 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -1334,9 +1823,10 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1360,9 +1850,10 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) it := &HmacKeyMetadataIterator{} req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) @@ -1414,9 +1905,10 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1429,191 +1921,3 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma } return resp, nil } - -// BucketIterator manages a stream of *storagepb.Bucket. -type BucketIterator struct { - items []*storagepb.Bucket - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *BucketIterator) Next() (*storagepb.Bucket, error) { - var item *storagepb.Bucket - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *BucketIterator) bufLen() int { - return len(it.items) -} - -func (it *BucketIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. -type NotificationConfigIterator struct { - items []*storagepb.NotificationConfig - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { - var item *storagepb.NotificationConfig - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationConfigIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationConfigIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// ObjectIterator manages a stream of *storagepb.Object. -type ObjectIterator struct { - items []*storagepb.Object - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ObjectIterator) Next() (*storagepb.Object, error) { - var item *storagepb.Object - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ObjectIterator) bufLen() int { - return len(it.items) -} - -func (it *ObjectIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go similarity index 66% rename from vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 32d4ec43c59bd..3486fd1533ec7 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,11 +14,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.2 // source: google/storage/v2/storage.proto -package storage +package storagepb import ( context "context" @@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} } // Request message for DeleteBucket. @@ -337,9 +337,12 @@ type CreateBucketRequest struct { // Required. The project to which this bucket will belong. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Properties of the new bucket being inserted. - // The project and name of the bucket are specified in the parent and - // bucket_id fields, respectively. Populating those fields in `bucket` will - // result in an error. + // The name of the bucket is specified in the `bucket_id` field. Populating + // `bucket.name` field will result in an error. + // The project of the bucket must be specified in the `bucket.project` field. + // This field must be in `projects/{projectIdentifier}` format, + // {projectIdentifier} can be the project ID or project number. The `parent` + // field must be either empty or `projects/_`. Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` // Required. The ID to use for this bucket, which will become the final // component of the bucket's resource name. For example, the value `foo` might @@ -663,8 +666,6 @@ type UpdateBucketRequest struct { // may accidentally reset the new field's value. // // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } @@ -1276,6 +1277,137 @@ func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } +// Message for restoring an object. +// `bucket`, `object`, and `generation` **must** be set. +type RestoreObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to restore. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // Required. The specific revision of the object to restore. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // If false or unset, the bucket's default object ACL will be used. + // If true, copy the source object's access controls. + // Return an error if bucket has UBLA enabled. + CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *RestoreObjectRequest) Reset() { + *x = RestoreObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectRequest) ProtoMessage() {} + +func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. +func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *RestoreObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *RestoreObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *RestoreObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetCopySourceAcl() bool { + if x != nil && x.CopySourceAcl != nil { + return *x.CopySourceAcl + } + return false +} + +func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + // Message for canceling an in-progress resumable upload. // `upload_id` **must** be set. type CancelResumableWriteRequest struct { @@ -1291,7 +1423,7 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1304,7 +1436,7 @@ func (x *CancelResumableWriteRequest) String() string { func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1317,7 +1449,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} } func (x *CancelResumableWriteRequest) GetUploadId() string { @@ -1338,7 +1470,7 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1351,7 +1483,7 @@ func (x *CancelResumableWriteResponse) String() string { func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1364,7 +1496,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } // Request message for ReadObject. @@ -1426,7 +1558,7 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1439,7 +1571,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1452,7 +1584,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (x *ReadObjectRequest) GetBucket() string { @@ -1545,6 +1677,8 @@ type GetObjectRequest struct { // If present, selects a specific revision of this object (as opposed to the // latest version, the default). Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // If true, return the soft-deleted version of this object. + SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"` // Makes the operation conditional on whether the object's current generation // matches the given value. Setting to 0 makes the operation succeed only if // there are no live versions of the object. @@ -1572,7 +1706,7 @@ type GetObjectRequest struct { func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1585,7 +1719,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1598,7 +1732,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (x *GetObjectRequest) GetBucket() string { @@ -1622,6 +1756,13 @@ func (x *GetObjectRequest) GetGeneration() int64 { return 0 } +func (x *GetObjectRequest) GetSoftDeleted() bool { + if x != nil && x.SoftDeleted != nil { + return *x.SoftDeleted + } + return false +} + func (x *GetObjectRequest) GetIfGenerationMatch() int64 { if x != nil && x.IfGenerationMatch != nil { return *x.IfGenerationMatch @@ -1675,9 +1816,9 @@ type ReadObjectResponse struct { // client that the request is still live while it is running an operation to // generate more data. ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` - // The checksums of the complete object. The client should compute one of - // these checksums over the downloaded object and compare it against the value - // provided here. + // The checksums of the complete object. If the object is downloaded in full, + // the client should compute one of these checksums over the downloaded object + // and compare it against the value provided here. ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // If read_offset and or read_limit was specified on the // ReadObjectRequest, ContentRange will be populated on the first @@ -1691,7 +1832,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1704,7 +1845,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1717,7 +1858,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1782,15 +1923,13 @@ type WriteObjectSpec struct { // This situation is considered a client error, and if such an error occurs // you must start the upload over from scratch, this time sending the correct // number of bytes. - // - // The `object_size` value is ignored for one-shot (non-resumable) writes. ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` } func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1803,7 +1942,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1816,7 +1955,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (x *WriteObjectSpec) GetResource() *Object { @@ -1877,7 +2016,6 @@ type WriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: - // // *WriteObjectRequest_UploadId // *WriteObjectRequest_WriteObjectSpec FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -1898,11 +2036,10 @@ type WriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: - // // *WriteObjectRequest_ChecksummedData Data isWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service - // don't match the specifified checksums the call will fail. May only be + // don't match the specified checksums the call will fail. May only be // provided in the first or last request (either with first_message, or // finish_write set). ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` @@ -1920,7 +2057,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1933,7 +2070,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1946,7 +2083,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2053,7 +2190,6 @@ type WriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: - // // *WriteObjectResponse_PersistedSize // *WriteObjectResponse_Resource WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2062,7 +2198,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2075,7 +2211,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2088,7 +2224,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2132,74 +2268,83 @@ func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} -// Request message for ListObjects. -type ListObjectsRequest struct { +// Request message for BidiWriteObject. +type BidiWriteObjectRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Name of the bucket in which to look for objects. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of `items` plus `prefixes` to return - // in a single page of responses. As duplicate `prefixes` are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, returns results in a directory-like mode. `items` will contain - // only objects whose names, aside from the `prefix`, do not - // contain `delimiter`. Objects whose names, aside from the - // `prefix`, contain `delimiter` will have their name, - // truncated after the `delimiter`, returned in - // `prefixes`. Duplicate `prefixes` are omitted. - Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` - // If true, objects that end in exactly one instance of `delimiter` - // will have their metadata included in `items` in addition to - // `prefixes`. - IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` - // Filter results to objects whose names begin with this prefix. - Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` - // If `true`, lists all versions of an object as distinct results. - // For more information, see - // [Object - // Versioning](https://cloud.google.com/storage/docs/object-versioning). - Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.acl and - // items.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` - // Optional. Filter results to objects whose names are lexicographically equal - // to or after lexicographic_start. If lexicographic_end is also set, the - // objects listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` - // Optional. Filter results to objects whose names are lexicographically - // before lexicographic_end. If lexicographic_start is also set, the objects - // listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // *BidiWriteObjectRequest_UploadId + // *BidiWriteObjectRequest_WriteObjectSpec + FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An invalid value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // *BidiWriteObjectRequest_ChecksummedData + Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // For each BidiWriteObjectRequest where state_lookup is `true` or the client + // closes the stream, the service will send a BidiWriteObjectResponse + // containing the current persisted size. The persisted size sent in responses + // covers all the bytes the server has persisted thus far and can be used to + // decide what data is safe for the client to drop. Note that the object's + // current size reported by the BidiWriteObjectResponse may lag behind the + // number of bytes written by the client. + StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` + // Persists data written on the stream, up to and including the current + // message, to permanent storage. This option should be used sparingly as it + // may reduce performance. Ongoing writes will periodically be persisted on + // the server even when `flush` is not set. + Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` } -func (x *ListObjectsRequest) Reset() { - *x = ListObjectsRequest{} +func (x *BidiWriteObjectRequest) Reset() { + *x = BidiWriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ListObjectsRequest) String() string { +func (x *BidiWriteObjectRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListObjectsRequest) ProtoMessage() {} +func (*BidiWriteObjectRequest) ProtoMessage() {} -func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] +func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2210,16 +2355,303 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. -func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} } -func (x *ListObjectsRequest) GetParent() string { - if x != nil { - return x.Parent +func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage } - return "" + return nil +} + +func (x *BidiWriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *BidiWriteObjectRequest) GetStateLookup() bool { + if x != nil { + return x.StateLookup + } + return false +} + +func (x *BidiWriteObjectRequest) GetFlush() bool { + if x != nil { + return x.Flush + } + return false +} + +func (x *BidiWriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isBidiWriteObjectRequest_FirstMessage interface { + isBidiWriteObjectRequest_FirstMessage() +} + +type BidiWriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type BidiWriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} + +func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} + +type isBidiWriteObjectRequest_Data interface { + isBidiWriteObjectRequest_Data() +} + +type BidiWriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {} + +// Response message for BidiWriteObject. +type BidiWriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // *BidiWriteObjectResponse_PersistedSize + // *BidiWriteObjectResponse_Resource + WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *BidiWriteObjectResponse) Reset() { + *x = BidiWriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectResponse) ProtoMessage() {} + +func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *BidiWriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *BidiWriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isBidiWriteObjectResponse_WriteStatus interface { + isBidiWriteObjectResponse_WriteStatus() +} + +type BidiWriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type BidiWriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {} + +func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {} + +// Request message for ListObjects. +type ListObjectsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which to look for objects. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results may be returned than requested. The service + // will use this parameter or 1,000 items, whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, returns results in a directory-like mode. `items` will contain + // only objects whose names, aside from the `prefix`, do not + // contain `delimiter`. Objects whose names, aside from the + // `prefix`, contain `delimiter` will have their name, + // truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // If true, objects that end in exactly one instance of `delimiter` + // will have their metadata included in `items` in addition to + // `prefixes`. + IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` + // Filter results to objects whose names begin with this prefix. + Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` + // If `true`, lists all versions of an object as distinct results. + // For more information, see + // [Object + // Versioning](https://cloud.google.com/storage/docs/object-versioning). + Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.acl and + // items.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Filter results to objects whose names are lexicographically equal + // to or after lexicographic_start. If lexicographic_end is also set, the + // objects listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` + // Optional. Filter results to objects whose names are lexicographically + // before lexicographic_end. If lexicographic_start is also set, the objects + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // Optional. If true, only list all soft-deleted versions of the object. + // Soft delete policy is required to set this option. + SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. Filter results to objects and prefixes that match this glob + // pattern. See [List Objects Using + // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) + // for the full syntax. + MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` +} + +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +} + +func (x *ListObjectsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" } func (x *ListObjectsRequest) GetPageSize() int32 { @@ -2285,6 +2717,20 @@ func (x *ListObjectsRequest) GetLexicographicEnd() string { return "" } +func (x *ListObjectsRequest) GetSoftDeleted() bool { + if x != nil { + return x.SoftDeleted + } + return false +} + +func (x *ListObjectsRequest) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + // Request object for `QueryWriteStatus`. type QueryWriteStatusRequest struct { state protoimpl.MessageState @@ -2301,7 +2747,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2314,7 +2760,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2327,7 +2773,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2353,7 +2799,6 @@ type QueryWriteStatusResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: - // // *QueryWriteStatusResponse_PersistedSize // *QueryWriteStatusResponse_Resource WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2362,7 +2807,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2375,7 +2820,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2388,7 +2833,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -2546,7 +2991,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2559,7 +3004,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2572,7 +3017,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -2762,7 +3207,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2775,7 +3220,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2788,7 +3233,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -2847,7 +3292,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2860,7 +3305,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2873,7 +3318,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -2911,7 +3356,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2924,7 +3369,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2937,7 +3382,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -2986,8 +3431,6 @@ type UpdateObjectRequest struct { // may accidentally reset the new field's value. // // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` @@ -2996,7 +3439,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3009,7 +3452,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3022,7 +3465,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -3087,15 +3530,15 @@ type GetServiceAccountRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Project ID, in the format of "projects/". - // can be the project ID or project number. + // Required. Project ID, in the format of "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetServiceAccountRequest) Reset() { *x = GetServiceAccountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3108,7 +3551,7 @@ func (x *GetServiceAccountRequest) String() string { func (*GetServiceAccountRequest) ProtoMessage() {} func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3121,7 +3564,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } func (x *GetServiceAccountRequest) GetProject() string { @@ -3138,7 +3581,7 @@ type CreateHmacKeyRequest struct { unknownFields protoimpl.UnknownFields // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/". can be the + // format of "projects/{projectIdentifier}". {projectIdentifier} can be the // project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // Required. The service account to create the HMAC for. @@ -3148,7 +3591,7 @@ type CreateHmacKeyRequest struct { func (x *CreateHmacKeyRequest) Reset() { *x = CreateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3161,7 +3604,7 @@ func (x *CreateHmacKeyRequest) String() string { func (*CreateHmacKeyRequest) ProtoMessage() {} func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3174,7 +3617,7 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *CreateHmacKeyRequest) GetProject() string { @@ -3207,7 +3650,7 @@ type CreateHmacKeyResponse struct { func (x *CreateHmacKeyResponse) Reset() { *x = CreateHmacKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3220,7 +3663,7 @@ func (x *CreateHmacKeyResponse) String() string { func (*CreateHmacKeyResponse) ProtoMessage() {} func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3233,7 +3676,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { @@ -3259,15 +3702,15 @@ type DeleteHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Required. The project that owns the HMAC key, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *DeleteHmacKeyRequest) Reset() { *x = DeleteHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3280,7 +3723,7 @@ func (x *DeleteHmacKeyRequest) String() string { func (*DeleteHmacKeyRequest) ProtoMessage() {} func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3293,7 +3736,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *DeleteHmacKeyRequest) GetAccessId() string { @@ -3319,15 +3762,15 @@ type GetHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Required. The project the HMAC key lies in, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetHmacKeyRequest) Reset() { *x = GetHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3340,7 +3783,7 @@ func (x *GetHmacKeyRequest) String() string { func (*GetHmacKeyRequest) ProtoMessage() {} func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3353,7 +3796,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *GetHmacKeyRequest) GetAccessId() string { @@ -3377,8 +3820,8 @@ type ListHmacKeysRequest struct { unknownFields protoimpl.UnknownFields // Required. The project to list HMAC keys for, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // The maximum number of keys to return. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -3393,7 +3836,7 @@ type ListHmacKeysRequest struct { func (x *ListHmacKeysRequest) Reset() { *x = ListHmacKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3406,7 +3849,7 @@ func (x *ListHmacKeysRequest) String() string { func (*ListHmacKeysRequest) ProtoMessage() {} func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3419,7 +3862,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *ListHmacKeysRequest) GetProject() string { @@ -3473,7 +3916,7 @@ type ListHmacKeysResponse struct { func (x *ListHmacKeysResponse) Reset() { *x = ListHmacKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3486,7 +3929,7 @@ func (x *ListHmacKeysResponse) String() string { func (*ListHmacKeysResponse) ProtoMessage() {} func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3499,7 +3942,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { @@ -3539,7 +3982,7 @@ type UpdateHmacKeyRequest struct { func (x *UpdateHmacKeyRequest) Reset() { *x = UpdateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3552,7 +3995,7 @@ func (x *UpdateHmacKeyRequest) String() string { func (*UpdateHmacKeyRequest) ProtoMessage() {} func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3565,7 +4008,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { @@ -3602,7 +4045,7 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3615,7 +4058,7 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3628,7 +4071,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -3662,7 +4105,7 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3675,7 +4118,7 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3688,7 +4131,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} } // A bucket. @@ -3698,6 +4141,7 @@ type Bucket struct { unknownFields protoimpl.UnknownFields // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Output only. The user-chosen part of the bucket name. The `{bucket}` // portion of the `name` field. For globally unique buckets, this is equal to @@ -3708,12 +4152,10 @@ type Bucket struct { // only be performed if the etag matches that of the bucket. Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` // Immutable. The project which owns this bucket, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. The metadata generation of this bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Immutable. The location of the bucket. Object data for objects in the // bucket resides in physical storage within this region. Defaults to `US`. @@ -3737,7 +4179,7 @@ type Bucket struct { // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region // buckets only. If rpo is not specified when the bucket is created, it // defaults to "DEFAULT". For more information, see - // https://cloud.google.com/storage/docs/turbo-replication. + // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` // Access controls on the bucket. // If iam_config.uniform_bucket_level_access is enabled on this bucket, @@ -3752,15 +4194,11 @@ type Bucket struct { // for more information. Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` // Output only. The creation time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an @@ -3815,12 +4253,15 @@ type Bucket struct { // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` } func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3833,7 +4274,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3846,7 +4287,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} } func (x *Bucket) GetName() string { @@ -4045,6 +4486,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass { return nil } +func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { + if x != nil { + return x.SoftDeletePolicy + } + return nil +} + // An access-control entry. type BucketAccessControl struct { state protoimpl.MessageState @@ -4095,7 +4543,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4108,7 +4556,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4121,7 +4569,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} } func (x *BucketAccessControl) GetRole() string { @@ -4194,7 +4642,7 @@ type ChecksummedData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The data. + // Optional. The data. Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` // If set, the CRC32C digest of the content field. Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` @@ -4203,7 +4651,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4216,7 +4664,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4229,7 +4677,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} } func (x *ChecksummedData) GetContent() []byte { @@ -4254,7 +4702,7 @@ type ObjectChecksums struct { // CRC32C digest of the object data. Computed by the Cloud Storage service for // all written objects. - // If set in an WriteObjectRequest, service will validate that the stored + // If set in a WriteObjectRequest, service will validate that the stored // object matches this checksum. Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` // 128 bit MD5 hash of the object data. @@ -4270,7 +4718,7 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4283,7 +4731,7 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4296,7 +4744,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -4320,14 +4768,14 @@ type HmacKeyMetadata struct { unknownFields protoimpl.UnknownFields // Immutable. Resource name ID of the key in the format - // /. - // can be the project ID or project number. + // {projectIdentifier}/{accessId}. + // {projectIdentifier} can be the project ID or project number. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Immutable. Globally unique id for keys. AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/". - // can be the project ID or project number. + // specified HMAC key, in the format "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. Email of the service account the key authenticates as. ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` @@ -4345,7 +4793,7 @@ type HmacKeyMetadata struct { func (x *HmacKeyMetadata) Reset() { *x = HmacKeyMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4358,7 +4806,7 @@ func (x *HmacKeyMetadata) String() string { func (*HmacKeyMetadata) ProtoMessage() {} func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4371,7 +4819,7 @@ func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} } func (x *HmacKeyMetadata) GetId() string { @@ -4465,7 +4913,7 @@ type NotificationConfig struct { func (x *NotificationConfig) Reset() { *x = NotificationConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4478,7 +4926,7 @@ func (x *NotificationConfig) String() string { func (*NotificationConfig) ProtoMessage() {} func (x *NotificationConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4491,7 +4939,7 @@ func (x *NotificationConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. func (*NotificationConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} } func (x *NotificationConfig) GetName() string { @@ -4560,7 +5008,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4573,7 +5021,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4586,7 +5034,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -4625,21 +5073,17 @@ type Object struct { // object. Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` // Immutable. The content generation of this object. Used for object - // versioning. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // versioning. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` // Output only. The version of the metadata for this generation of this // object. Used for preconditions and for detecting changes in metadata. A // metageneration number is only meaningful in the context of a particular - // generation of a particular object. Attempting to set or update this field - // will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // generation of a particular object. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Storage class of the object. StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` // Output only. Content-Length of the object data in bytes, matching // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` // Content-Encoding of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] @@ -4659,10 +5103,8 @@ type Object struct { // Content-Language of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` - // Output only. The deletion time of the object. Will be returned if and only - // if this version of the object has been deleted. Attempting to set or update - // this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // Output only. If this object is noncurrent, this is the time when the object + // became noncurrent. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. @@ -4670,13 +5112,9 @@ type Object struct { // `application/octet-stream`. ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Output only. The creation time of the object. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. - // Components are accumulated by compose operations. Attempting to set or - // update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // Components are accumulated by compose operations. ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` // Output only. Hashes for the data part of this object. This field is used // for output only and will be silently ignored if provided in requests. @@ -4687,16 +5125,12 @@ type Object struct { // such as modifying custom metadata, as well as changes made by Cloud Storage // on behalf of a requester, such as changing the storage class based on an // Object Lifecycle Configuration. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` // Output only. The time at which the object's storage class was last changed. // When the object is initially created, it will be set to time_created. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case @@ -4727,8 +5161,7 @@ type Object struct { // In a response, this field will always be set to true or false. EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` // Output only. The owner of the object. This will always be the uploader of - // the object. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // the object. Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by // such a key. @@ -4740,7 +5173,7 @@ type Object struct { func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4753,7 +5186,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4766,7 +5199,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} } func (x *Object) GetName() string { @@ -5008,7 +5441,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5021,7 +5454,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5034,7 +5467,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} } func (x *ObjectAccessControl) GetRole() string { @@ -5119,7 +5552,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5132,7 +5565,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5145,7 +5578,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5184,7 +5617,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5197,7 +5630,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5210,7 +5643,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5242,7 +5675,7 @@ type ServiceAccount struct { func (x *ServiceAccount) Reset() { *x = ServiceAccount{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5255,7 +5688,7 @@ func (x *ServiceAccount) String() string { func (*ServiceAccount) ProtoMessage() {} func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5268,7 +5701,7 @@ func (x *ServiceAccount) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} } func (x *ServiceAccount) GetEmailAddress() string { @@ -5293,7 +5726,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5306,7 +5739,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5319,7 +5752,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} } func (x *Owner) GetEntity() string { @@ -5353,7 +5786,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5366,7 +5799,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5379,7 +5812,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} } func (x *ContentRange) GetStart() int64 { @@ -5421,7 +5854,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5434,7 +5867,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5486,7 +5919,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5499,7 +5932,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5535,7 +5968,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5548,7 +5981,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5561,7 +5994,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -5601,7 +6034,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5614,7 +6047,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5627,7 +6060,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -5672,7 +6105,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5685,7 +6118,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5698,7 +6131,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -5724,7 +6157,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5737,7 +6170,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5750,7 +6183,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -5782,7 +6215,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5795,7 +6228,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5808,7 +6241,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -5834,7 +6267,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5847,7 +6280,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5860,7 +6293,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -5888,11 +6321,6 @@ type Bucket_RetentionPolicy struct { EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` // Once locked, an object retention policy cannot be modified. IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` - // The duration in seconds that objects need to be retained. Retention - // duration must be greater than zero and less than 100 years. Note that - // enforcement of retention periods less than a day is not guaranteed. Such - // periods should only be used for testing purposes. - RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` // The duration that objects need to be retained. Retention duration must be // greater than zero and less than 100 years. Note that enforcement of // retention periods less than a day is not guaranteed. Such periods should @@ -5904,7 +6332,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5917,7 +6345,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5930,7 +6358,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -5947,20 +6375,73 @@ func (x *Bucket_RetentionPolicy) GetIsLocked() bool { return false } -func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { - if x != nil && x.RetentionPeriod != nil { - return *x.RetentionPeriod +func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration } - return 0 + return nil } -func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { +// Soft delete policy properties of a bucket. +type Bucket_SoftDeletePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"` + // Time from which the policy was effective. This is service-provided. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"` +} + +func (x *Bucket_SoftDeletePolicy) Reset() { + *x = Bucket_SoftDeletePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_SoftDeletePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_SoftDeletePolicy) ProtoMessage() {} + +func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. +func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} +} + +func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { if x != nil { return x.RetentionDuration } return nil } +func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + // Properties of a bucket related to versioning. // For more on Cloud Storage versioning, see // https://cloud.google.com/storage/docs/object-versioning. @@ -5976,7 +6457,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5989,7 +6470,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6002,7 +6483,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6036,7 +6517,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6049,7 +6530,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6062,7 +6543,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6094,7 +6575,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6107,7 +6588,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6120,7 +6601,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6143,12 +6624,19 @@ type Bucket_Autoclass struct { // Autoclass is enabled when the bucket is created, the toggle_time is set // to the bucket creation time. ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + // An object in an Autoclass bucket will eventually cool down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"` + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6161,7 +6649,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6174,7 +6662,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6191,6 +6679,20 @@ func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { return nil } +func (x *Bucket_Autoclass) GetTerminalStorageClass() string { + if x != nil && x.TerminalStorageClass != nil { + return *x.TerminalStorageClass + } + return "" +} + +func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.TerminalStorageClassUpdateTime + } + return nil +} + // Settings for Uniform Bucket level access. // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. type Bucket_IamConfig_UniformBucketLevelAccess struct { @@ -6210,7 +6712,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6223,7 +6725,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6236,7 +6738,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6269,7 +6771,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6282,7 +6784,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6295,7 +6797,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6329,7 +6831,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6342,7 +6844,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6355,7 +6857,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -6427,7 +6929,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6440,7 +6942,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6453,7 +6955,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -6598,685 +7100,781 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, - 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, - 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x59, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xbd, 0x01, 0x0a, 0x1f, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, - 0x0a, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9b, 0x01, 0x0a, 0x1e, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, - 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, - 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, - 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, - 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, + 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, + 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, + 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, + 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, + 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, + 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, + 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, + 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, - 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, + 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, - 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, - 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, - 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, - 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, - 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, + 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, + 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, + 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, - 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, - 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, - 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, - 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, - 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, - 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, - 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, - 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, - 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, - 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, - 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, + 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, + 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, + 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, - 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, - 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, - 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, - 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, + 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, - 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, - 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, - 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, - 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, + 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, + 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, + 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, @@ -7284,896 +7882,952 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, - 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, - 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, - 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, - 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, - 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, - 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, - 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, - 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, - 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, - 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, - 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, - 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, - 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, - 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, - 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, - 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, - 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, - 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, - 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, - 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, - 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, - 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, - 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, - 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, - 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, - 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, - 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, - 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, - 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, - 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, - 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, - 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, - 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, - 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, - 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, - 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, + 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, + 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, - 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, - 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, + 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, + 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, + 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, + 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, + 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, + 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, + 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, + 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, + 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, + 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, + 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, + 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, + 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, + 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, + 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, + 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, + 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, + 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, + 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, - 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, - 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, - 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, - 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, - 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, - 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, - 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, - 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, - 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, - 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, - 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, + 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, + 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, + 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, + 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, - 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, - 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, - 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, - 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, - 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, - 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, - 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, - 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, - 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, - 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, - 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, - 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, - 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, - 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, - 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, - 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, - 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, - 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, - 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, - 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, - 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, + 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, + 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, + 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, + 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, + 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, + 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, + 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, - 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, - 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, - 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, - 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, - 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, - 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, + 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, + 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, + 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, + 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, + 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, + 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, + 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, + 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, + 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, + 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, + 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, + 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, - 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, - 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, - 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, - 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, + 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, + 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, + 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, + 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, + 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, + 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, + 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, + 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, + 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, + 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, - 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, - 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, - 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, - 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, - 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, - 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, - 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, + 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, + 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, + 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, + 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, - 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, - 0x6c, 0x64, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, - 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, - 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, - 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x26, - 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xab, - 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x22, 0x58, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, + 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, + 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, + 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, + 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, + 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, + 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, + 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, - 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, - 0x7d, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, + 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, - 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x8a, + 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, - 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, - 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, - 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xba, 0x01, 0x0a, 0x14, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, - 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, + 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, + 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, - 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, + 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, - 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, - 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x80, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, + 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x11, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, - 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, + 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, - 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x1a, 0xa7, - 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, - 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, - 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, - 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, - 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, - 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, + 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0xea, 0x41, 0x78, 0x0a, - 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, - 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, - 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14, + 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca, + 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, + 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, + 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, + 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -8189,7 +8843,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8206,233 +8860,251 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse - (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 41: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata - (*NotificationConfig)(nil), // 46: google.storage.v2.NotificationConfig - (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption - (*Object)(nil), // 48: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount - (*Owner)(nil), // 53: google.storage.v2.Owner - (*ContentRange)(nil), // 54: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass - nil, // 68: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 73: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 74: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 77: google.protobuf.Duration - (*date.Date)(nil), // 78: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 82: google.protobuf.Empty - (*iampb.Policy)(nil), // 83: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse + (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 44: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata + (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig + (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption + (*Object)(nil), // 51: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount + (*Owner)(nil), // 56: google.storage.v2.Owner + (*ContentRange)(nil), // 57: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass + nil, // 72: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 78: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 81: google.protobuf.Duration + (*date.Date)(nil), // 82: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 86: google.protobuf.Empty + (*iampb.Policy)(nil), // 87: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 46, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig - 46, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig - 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 39, // 13: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 14: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 15: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 16: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 43, // 17: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 18: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 54, // 19: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 48, // 20: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 48, // 21: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 20, // 22: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 43, // 23: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 24: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 25: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 26: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 75, // 27: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 28: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 29: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 48, // 30: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 39, // 31: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 32: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 33: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 20, // 34: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 39, // 35: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 36: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 37: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 75, // 38: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 39: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 45, // 40: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 41: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 42: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 75, // 43: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 49, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 61, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 76, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 58, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 76, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 68, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 65, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 64, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 62, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 53, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 59, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 57, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 63, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 60, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 66, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 67, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 73, // 64: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 44, // 68: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 76, // 69: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 76, // 70: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 76, // 71: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 74, // 72: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 53, // 73: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 47, // 74: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 76, // 75: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 51, // 76: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 48, // 77: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 56, // 78: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 69, // 79: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 70, // 80: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 76, // 81: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 77, // 82: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 76, // 83: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 76, // 84: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 71, // 85: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 72, // 86: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 78, // 87: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 78, // 88: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 78, // 89: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 90: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 91: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 92: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 93: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 94: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 79, // 95: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 99: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 100: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 101: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 102: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 18, // 106: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 17, // 107: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 30, // 108: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 21, // 109: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 23, // 110: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 26, // 111: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 28, // 112: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 24, // 113: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 31, // 114: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 32, // 115: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 34, // 116: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 35, // 117: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 36, // 118: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 38, // 119: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 82, // 120: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 41, // 121: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 41, // 122: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 123: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 41, // 124: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 83, // 125: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 82, // 129: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 46, // 131: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 132: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 48, // 136: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 19, // 137: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 48, // 138: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 22, // 139: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 50, // 140: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 27, // 141: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 29, // 142: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 25, // 143: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 52, // 144: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 33, // 145: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 82, // 146: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 45, // 147: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 37, // 148: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 45, // 149: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 120, // [120:150] is the sub-list for method output_type - 90, // [90:120] is the sub-list for method input_type - 90, // [90:90] is the sub-list for extension type_name - 90, // [90:90] is the sub-list for extension extendee - 0, // [0:90] is the sub-list for field type_name + 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig + 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig + 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 132, // [132:164] is the sub-list for method output_type + 100, // [100:132] is the sub-list for method input_type + 100, // [100:100] is the sub-list for extension type_name + 100, // [100:100] is the sub-list for extension extendee + 0, // [0:100] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -8610,7 +9282,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteRequest); i { + switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state case 1: @@ -8622,7 +9294,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteResponse); i { + switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8634,7 +9306,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { + switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8646,7 +9318,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { + switch v := v.(*ReadObjectRequest); i { case 0: return &v.state case 1: @@ -8658,7 +9330,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { + switch v := v.(*GetObjectRequest); i { case 0: return &v.state case 1: @@ -8670,7 +9342,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { + switch v := v.(*ReadObjectResponse); i { case 0: return &v.state case 1: @@ -8682,7 +9354,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { + switch v := v.(*WriteObjectSpec); i { case 0: return &v.state case 1: @@ -8694,7 +9366,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { + switch v := v.(*WriteObjectRequest); i { case 0: return &v.state case 1: @@ -8706,7 +9378,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { + switch v := v.(*WriteObjectResponse); i { case 0: return &v.state case 1: @@ -8718,7 +9390,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { + switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state case 1: @@ -8730,7 +9402,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { + switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state case 1: @@ -8742,7 +9414,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { + switch v := v.(*ListObjectsRequest); i { case 0: return &v.state case 1: @@ -8754,7 +9426,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { + switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state case 1: @@ -8766,7 +9438,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { + switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state case 1: @@ -8778,7 +9450,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { + switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state case 1: @@ -8790,7 +9462,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { + switch v := v.(*RewriteResponse); i { case 0: return &v.state case 1: @@ -8802,7 +9474,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { + switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8814,7 +9486,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { + switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8826,7 +9498,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { + switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state case 1: @@ -8838,7 +9510,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { + switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state case 1: @@ -8850,7 +9522,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { + switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8862,7 +9534,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { + switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state case 1: @@ -8874,7 +9546,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { + switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8886,7 +9558,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { + switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8898,7 +9570,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { + switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state case 1: @@ -8910,7 +9582,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { + switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state case 1: @@ -8922,7 +9594,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { + switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8934,7 +9606,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { + switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state case 1: @@ -8946,7 +9618,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { + switch v := v.(*ServiceConstants); i { case 0: return &v.state case 1: @@ -8958,7 +9630,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { + switch v := v.(*Bucket); i { case 0: return &v.state case 1: @@ -8970,7 +9642,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { + switch v := v.(*BucketAccessControl); i { case 0: return &v.state case 1: @@ -8982,7 +9654,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotificationConfig); i { + switch v := v.(*ChecksummedData); i { case 0: return &v.state case 1: @@ -8994,7 +9666,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { + switch v := v.(*ObjectChecksums); i { case 0: return &v.state case 1: @@ -9006,7 +9678,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { + switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state case 1: @@ -9018,7 +9690,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { + switch v := v.(*NotificationConfig); i { case 0: return &v.state case 1: @@ -9030,7 +9702,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { + switch v := v.(*CustomerEncryption); i { case 0: return &v.state case 1: @@ -9042,7 +9714,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { + switch v := v.(*Object); i { case 0: return &v.state case 1: @@ -9054,7 +9726,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { + switch v := v.(*ObjectAccessControl); i { case 0: return &v.state case 1: @@ -9066,7 +9738,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -9078,7 +9750,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -9090,7 +9762,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { + switch v := v.(*ServiceAccount); i { case 0: return &v.state case 1: @@ -9102,7 +9774,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + switch v := v.(*Owner); i { case 0: return &v.state case 1: @@ -9114,7 +9786,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { + switch v := v.(*ContentRange); i { case 0: return &v.state case 1: @@ -9126,7 +9798,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { + switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state case 1: @@ -9138,7 +9810,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state case 1: @@ -9150,7 +9822,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { + switch v := v.(*Bucket_Billing); i { case 0: return &v.state case 1: @@ -9162,7 +9834,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { + switch v := v.(*Bucket_Cors); i { case 0: return &v.state case 1: @@ -9174,7 +9846,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { + switch v := v.(*Bucket_Encryption); i { case 0: return &v.state case 1: @@ -9186,7 +9858,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { + switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state case 1: @@ -9198,7 +9870,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { + switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state case 1: @@ -9210,7 +9882,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { + switch v := v.(*Bucket_Logging); i { case 0: return &v.state case 1: @@ -9222,7 +9894,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { + switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state case 1: @@ -9234,7 +9906,19 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { + switch v := v.(*Bucket_SoftDeletePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Versioning); i { case 0: return &v.state case 1: @@ -9246,7 +9930,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + switch v := v.(*Bucket_Website); i { case 0: return &v.state case 1: @@ -9258,7 +9942,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { + switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state case 1: @@ -9270,6 +9954,42 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -9281,7 +10001,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -9300,38 +10020,49 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ + (*BidiWriteObjectRequest_UploadId)(nil), + (*BidiWriteObjectRequest_WriteObjectSpec)(nil), + (*BidiWriteObjectRequest_ChecksummedData)(nil), + } file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*BidiWriteObjectResponse_PersistedSize)(nil), + (*BidiWriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 74, + NumMessages: 78, NumExtensions: 0, NumServices: 1, }, @@ -9370,19 +10101,19 @@ type StorageClient interface { LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) @@ -9400,10 +10131,23 @@ type StorageClient interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -9454,6 +10198,8 @@ type StorageClient interface { // persisted offset. Even though the data isn't written, it may still // incur a performance cost over resuming at the correct write offset. // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9465,7 +10211,27 @@ type StorageClient interface { // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object's // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) // Retrieves a list of objects matching the criteria. ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -9646,6 +10412,15 @@ func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectReques return out, nil } +func (c *storageClient) RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RestoreObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { out := new(CancelResumableWriteResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) @@ -9739,6 +10514,37 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) return m, nil } +func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageBidiWriteObjectClient{stream} + return x, nil +} + +type Storage_BidiWriteObjectClient interface { + Send(*BidiWriteObjectRequest) error + Recv() (*BidiWriteObjectResponse, error) + grpc.ClientStream +} + +type storageBidiWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageBidiWriteObjectClient) Send(m *BidiWriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectClient) Recv() (*BidiWriteObjectResponse, error) { + m := new(BidiWriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { out := new(ListObjectsResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) @@ -9843,19 +10649,19 @@ type StorageServer interface { LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) @@ -9873,10 +10679,23 @@ type StorageServer interface { // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(context.Context, *GetObjectRequest) (*Object, error) @@ -9927,6 +10746,8 @@ type StorageServer interface { // persisted offset. Even though the data isn't written, it may still // incur a performance cost over resuming at the correct write offset. // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9938,7 +10759,27 @@ type StorageServer interface { // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object's // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(Storage_WriteObjectServer) error + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(Storage_BidiWriteObjectServer) error // Retrieves a list of objects matching the criteria. ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -10025,6 +10866,9 @@ func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObject func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } +func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") } @@ -10040,6 +10884,9 @@ func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRe func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") } +func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") +} func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") } @@ -10345,6 +11192,24 @@ func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _Storage_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RestoreObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RestoreObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RestoreObject(ctx, req.(*RestoreObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CancelResumableWriteRequest) if err := dec(in); err != nil { @@ -10446,6 +11311,32 @@ func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { return m, nil } +func _Storage_BidiWriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).BidiWriteObject(&storageBidiWriteObjectServer{stream}) +} + +type Storage_BidiWriteObjectServer interface { + Send(*BidiWriteObjectResponse) error + Recv() (*BidiWriteObjectRequest, error) + grpc.ServerStream +} + +type storageBidiWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageBidiWriteObjectServer) Send(m *BidiWriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectServer) Recv() (*BidiWriteObjectRequest, error) { + m := new(BidiWriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListObjectsRequest) if err := dec(in); err != nil { @@ -10690,6 +11581,10 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteObject", Handler: _Storage_DeleteObject_Handler, }, + { + MethodName: "RestoreObject", + Handler: _Storage_RestoreObject_Handler, + }, { MethodName: "CancelResumableWrite", Handler: _Storage_CancelResumableWrite_Handler, @@ -10754,6 +11649,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ Handler: _Storage_WriteObject_Handler, ClientStreams: true, }, + { + StreamName: "BidiWriteObject", + Handler: _Storage_BidiWriteObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "google/storage/v2/storage.proto", } diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 783750d46da99..eca9b294aa1a6 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.30.1" +const Version = "1.35.1" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 810d64285d0ce..dc79fd88bbc49 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "net" - "net/http" "net/url" "strings" @@ -29,6 +28,7 @@ import ( sinternal "cloud.google.com/go/storage/internal" "github.com/google/uuid" gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" "google.golang.org/api/googleapi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,10 +37,15 @@ import ( var defaultRetry *retryConfig = &retryConfig{} var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) +const ( + xGoogHeaderKey = "x-goog-api-client" + idempotencyHeaderKey = "x-goog-gcs-idempotency-token" +) + // run determines whether a retry is necessary based on the config and // idempotency information. It then calls the function with or without retries // as appropriate, using the configured settings. -func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { +func run(ctx context.Context, call func(ctx context.Context) error, retry *retryConfig, isIdempotent bool) error { attempts := 1 invocationID := uuid.New().String() @@ -48,8 +53,8 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten retry = defaultRetry } if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { - setHeader(invocationID, attempts) - return call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + return call(ctxWithHeaders) } bo := gax.Backoff{} if retry.backoff != nil { @@ -63,30 +68,22 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten } return internal.Retry(ctx, bo, func() (stop bool, err error) { - setHeader(invocationID, attempts) - err = call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + err = call(ctxWithHeaders) attempts++ return !errorFunc(err), err }) } -func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { - return func(invocationID string, attempts int) { - if req == nil { - return - } - header := req.Header() - invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - header.Set("x-goog-api-client", xGoogHeader) - } -} +// Sets invocation ID headers on the context which will be propagated as +// headers in the call to the service (for both gRPC and HTTP). +func setInvocationHeaders(ctx context.Context, invocationID string, attempts int) context.Context { + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") -// TODO: Implement method setting header via context for gRPC -func setRetryHeaderGRPC(_ context.Context) func(string, int) { - return func(_ string, _ int) { - return - } + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) + return ctx } // ShouldRetry returns true if an error is retryable, based on best practice @@ -131,12 +128,11 @@ func ShouldRetry(err error) bool { return true } } - // HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per - // https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html. - // - // This is only necessary for the experimental gRPC-based media operations. - if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { - return true + // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. + if st, ok := status.FromError(err); ok { + if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { + return true + } } // Unwrap is only supported in go1.13.x+ if e, ok := err.(interface{ Unwrap() error }); ok { diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index d5619ac5adb93..56f3e3daa5cc7 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,7 +21,7 @@ import ( "regexp" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go index f59ab818f644e..e72ceb78f061d 100644 --- a/vendor/cloud.google.com/go/storage/option.go +++ b/vendor/cloud.google.com/go/storage/option.go @@ -57,7 +57,7 @@ func WithJSONReads() option.ClientOption { } // WithXMLReads is an option that may be passed to a Storage Client on creation. -// It sets the client to use the JSON API for object reads. +// It sets the client to use the XML API for object reads. // // This is the current default. func WithXMLReads() option.ClientOption { diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index 2961aca206281..6bc73fb7af47e 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -32,7 +32,7 @@ import ( // Please see https://cloud.google.com/storage/docs/xml-api/post-object // for reference about the fields. type PostPolicyV4Options struct { - // GoogleAccessID represents the authorizer of the signed URL generation. + // GoogleAccessID represents the authorizer of the signed post policy generation. // It is typically the Google service account client email address from // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". // Required. @@ -85,7 +85,7 @@ type PostPolicyV4Options struct { // Exactly one of PrivateKey or SignRawBytes must be non-nil. SignRawBytes func(bytes []byte) (signature []byte, err error) - // Expires is the expiration time on the signed URL. + // Expires is the expiration time on the signed post policy. // It must be a time in the future. // Required. Expires time.Time @@ -113,6 +113,12 @@ type PostPolicyV4Options struct { // Optional. Conditions []PostPolicyV4Condition + // Hostname sets the host of the signed post policy. This field overrides + // any endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle URLStyle. + // Optional. + Hostname string + shouldHashSignBytes bool } @@ -128,6 +134,7 @@ func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { Fields: opts.Fields, Conditions: opts.Conditions, shouldHashSignBytes: opts.shouldHashSignBytes, + Hostname: opts.Hostname, } } @@ -370,7 +377,7 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options u := &url.URL{ Path: path, RawPath: pathEncodeV4(path), - Host: opts.Style.host(bucket), + Host: opts.Style.host(opts.Hostname, bucket), Scheme: scheme, } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 1bb65ec8107b2..4673a68d07899 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -87,8 +87,9 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // that file will be served back whole, regardless of the requested range as // Google Cloud Storage dictates. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() + // This span covers the life of the reader. It is closed via the context + // in Reader.Close. + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Reader") if err := o.validate(); err != nil { return nil, err @@ -117,6 +118,14 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) r, err = o.c.tc.NewRangeReader(ctx, params, opts...) + // Pass the context so that the span can be closed in Reader.Close, or close the + // span now if there is an error. + if err == nil { + r.ctx = ctx + } else { + trace.EndSpan(ctx, err) + } + return r, err } @@ -178,16 +187,6 @@ func setConditionsHeaders(headers http.Header, conds *Conditions) error { return nil } -// Wrap a request to look similar to an apiary library request, in order to -// be used by run(). -type readerRequestWrapper struct { - req *http.Request -} - -func (w *readerRequestWrapper) Header() http.Header { - return w.req.Header -} - var emptyBody = ioutil.NopCloser(strings.NewReader("")) // Reader reads a Cloud Storage object. @@ -204,11 +203,14 @@ type Reader struct { gotCRC uint32 // running crc reader io.ReadCloser + ctx context.Context } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { - return r.reader.Close() + err := r.reader.Close() + trace.EndSpan(r.ctx, err) + return err } func (r *Reader) Read(p []byte) (int, error) { diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 2a5723a22ff3a..a16e512f5e7d3 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -41,7 +41,7 @@ import ( "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" "cloud.google.com/go/storage/internal" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "github.com/googleapis/gax-go/v2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" @@ -109,8 +109,8 @@ type Client struct { raw *raw.Service // Scheme describes the scheme under the current host. scheme string - // ReadHost is the default host used on the reader. - readHost string + // xmlHost is the default host used for XML requests. + xmlHost string // May be nil. creds *google.Credentials retry *retryConfig @@ -123,7 +123,7 @@ type Client struct { useGRPC bool } -// NewClient creates a new Google Cloud Storage client. +// NewClient creates a new Google Cloud Storage client using the HTTP transport. // The default scope is ScopeFullControl. To use a different scope, like // ScopeReadOnly, use option.WithScopes. // @@ -133,12 +133,6 @@ type Client struct { // You may configure the client by passing in options from the [google.golang.org/api/option] // package. You may also use options defined in this package, such as [WithJSONReads]. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - // Use the experimental gRPC client if the env var is set. - // This is an experimental API and not intended for public use. - if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { - return newGRPCClient(ctx, opts...) - } - var creds *google.Credentials // In general, it is recommended to use raw.NewService instead of htransport.NewClient @@ -199,7 +193,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error if err != nil { return nil, fmt.Errorf("storage client: %w", err) } - // Update readHost and scheme with the chosen endpoint. + // Update xmlHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) @@ -211,20 +205,29 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error } return &Client{ - hc: hc, - raw: rawService, - scheme: u.Scheme, - readHost: u.Host, - creds: creds, - tc: tc, + hc: hc, + raw: rawService, + scheme: u.Scheme, + xmlHost: u.Host, + creds: creds, + tc: tc, }, nil } -// newGRPCClient creates a new Storage client that initializes a gRPC-based -// client. Calls that have not been implemented in gRPC will panic. +// NewGRPCClient creates a new Storage client using the gRPC transport and API. +// Client methods which have not been implemented in gRPC will return an error. +// In particular, methods for Cloud Pub/Sub notifications are not supported. // -// This is an experimental API and not intended for public use. -func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { +// The storage gRPC API is still in preview and not yet publicly available. +// If you would like to use the API, please first contact your GCP account rep to +// request access. The API may be subject to breaking changes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. +func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { @@ -262,13 +265,13 @@ const ( SigningSchemeV4 ) -// URLStyle determines the style to use for the signed URL. pathStyle is the +// URLStyle determines the style to use for the signed URL. PathStyle is the // default. All non-default options work with V4 scheme only. See // https://cloud.google.com/storage/docs/request-endpoints for details. type URLStyle interface { // host should return the host portion of the signed URL, not including // the scheme (e.g. storage.googleapis.com). - host(bucket string) string + host(hostname, bucket string) string // path should return the path portion of the signed URL, which may include // both the bucket and object name or only the object name depending on the @@ -284,7 +287,11 @@ type bucketBoundHostname struct { hostname string } -func (s pathStyle) host(bucket string) string { +func (s pathStyle) host(hostname, bucket string) string { + if hostname != "" { + return stripScheme(hostname) + } + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return stripScheme(host) } @@ -292,7 +299,7 @@ func (s pathStyle) host(bucket string) string { return "storage.googleapis.com" } -func (s virtualHostedStyle) host(bucket string) string { +func (s virtualHostedStyle) host(_, bucket string) string { if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return bucket + "." + stripScheme(host) } @@ -300,7 +307,7 @@ func (s virtualHostedStyle) host(bucket string) string { return bucket + ".storage.googleapis.com" } -func (s bucketBoundHostname) host(bucket string) string { +func (s bucketBoundHostname) host(_, bucket string) string { return s.hostname } @@ -321,7 +328,10 @@ func (s bucketBoundHostname) path(bucket, object string) string { } // PathStyle is the default style, and will generate a URL of the form -// "storage.googleapis.com//". +// "//". By default, is +// storage.googleapis.com, but setting an endpoint on the storage Client or +// through STORAGE_EMULATOR_HOST overrides this. Setting Hostname on +// SignedURLOptions or PostPolicyV4Options overrides everything else. func PathStyle() URLStyle { return pathStyle{} } @@ -442,6 +452,12 @@ type SignedURLOptions struct { // Scheme determines the version of URL signing to use. Default is // SigningSchemeV2. Scheme SigningScheme + + // Hostname sets the host of the signed URL. This field overrides any + // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle URLStyle. + // Optional. + Hostname string } func (opts *SignedURLOptions) clone() *SignedURLOptions { @@ -458,6 +474,7 @@ func (opts *SignedURLOptions) clone() *SignedURLOptions { Style: opts.Style, Insecure: opts.Insecure, Scheme: opts.Scheme, + Hostname: opts.Hostname, } } @@ -716,7 +733,7 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. - u.Host = opts.Style.host(bucket) + u.Host = opts.Style.host(opts.Hostname, bucket) // Fill in the URL scheme. if opts.Insecure { @@ -850,7 +867,7 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { } encoded := base64.StdEncoding.EncodeToString(b) u.Scheme = "https" - u.Host = PathStyle().host(bucket) + u.Host = PathStyle().host(opts.Hostname, bucket) q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) @@ -893,7 +910,9 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { } // If returns a new ObjectHandle that applies a set of preconditions. -// Preconditions already set on the ObjectHandle are ignored. +// Preconditions already set on the ObjectHandle are ignored. The supplied +// Conditions must have at least one field set to a non-default value; +// otherwise an error will be returned from any operation on the ObjectHandle. // Operations on the new handle will return an error if the preconditions are not // satisfied. See https://cloud.google.com/storage/docs/generations-preconditions // for more details. @@ -1020,6 +1039,7 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { // It is the caller's responsibility to call Close when writing is done. To // stop writing without saving the data, cancel the context. func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer") return &Writer{ ctx: ctx, o: o, @@ -1163,7 +1183,7 @@ func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storage o.Acl = toProtoObjectACL(uattrs.ACL) } - // TODO(cathyo): Handle metadata. Pending b/230510191. + o.Metadata = uattrs.Metadata return o } @@ -1484,6 +1504,8 @@ type Query struct { // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. // Duplicate prefixes are omitted. + // Must be set to / when used with the MatchGlob parameter to filter results + // in a directory-like mode. // Optional. Delimiter string @@ -1497,9 +1519,9 @@ type Query struct { Versions bool // attrSelection is used to select only specific fields to be returned by - // the query. It is set by the user calling calling SetAttrSelection. These + // the query. It is set by the user calling SetAttrSelection. These // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON - // clients repsectively. + // clients respectively. attrSelection []string // StartOffset is used to filter results to objects whose names are @@ -1525,6 +1547,12 @@ type Query struct { // true, they will also be included as objects and their metadata will be // populated in the returned ObjectAttrs. IncludeTrailingDelimiter bool + + // MatchGlob is a glob pattern used to filter results (for example, foo*bar). See + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob + // for syntax details. When Delimiter is set in conjunction with MatchGlob, + // it must be set to /. + MatchGlob string } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -2162,8 +2190,6 @@ func toProjectResource(project string) string { // setConditionProtoField uses protobuf reflection to set named condition field // to the given condition value if supported on the protobuf message. -// -// This is an experimental API and not intended for public use. func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { fields := m.Descriptor().Fields() if rf := fields.ByName(protoreflect.Name(f)); rf != nil { @@ -2176,8 +2202,6 @@ func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { // applyCondsProto validates and attempts to set the conditions on a protobuf // message using protobuf reflection. -// -// This is an experimental API and not intended for public use. func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { rmsg := msg.ProtoReflect() diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 3a6a1ce0f56c4..aeb7ed418c8d6 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -22,6 +22,8 @@ import ( "sync" "time" "unicode/utf8" + + "cloud.google.com/go/internal/trace" ) // A Writer writes a Cloud Storage object. @@ -86,7 +88,7 @@ type Writer struct { // cancellation. ChunkRetryDeadline time.Duration - // ProgressFunc can be used to monitor the progress of a large write. + // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), @@ -163,6 +165,7 @@ func (w *Writer) Close() error { <-w.donec w.mu.Lock() defer w.mu.Unlock() + trace.EndSpan(w.ctx, w.err) return w.err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 8206a57c7735c..7a0a524e33212 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,139 @@ # Release History +## 1.9.2 (2024-02-06) + +### Bugs Fixed + +* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header. + +### Other Changes + +* Update to latest version of `internal`. + +## 1.9.1 (2023-12-11) + +### Bugs Fixed + +* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. + +### Other Changes + +* Update dependencies. + +## 1.9.0 (2023-11-06) + +### Breaking Changes +> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` +* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. +* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. + +### Bugs Fixed + +* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. +* Include error text instead of error type in traces when the transport returns an error. +* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. +* Block key and SAS authentication for non TLS protected endpoints. +* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. +* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. +* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. +* Context values created by `azcore` will no longer flow across disjoint HTTP requests. + +### Other Changes + +* Skip generating trace info for no-op tracers. +* The `clientName` paramater in client constructors has been renamed to `moduleName`. + +## 1.9.0-beta.1 (2023-10-05) + +### Other Changes + +* The beta features for tracing and fakes have been reinstated. + +## 1.8.0 (2023-10-05) + +### Features Added + +* This includes the following features from `v1.8.0-beta.N` releases. + * Claims and CAE for authentication. + * New `messaging` package. + * Various helpers in the `runtime` package. + * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. +* Added types `KeyCredential` and `SASCredential` to the `azcore` package. + * Includes their respective constructor functions. +* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. + * Includes their respective constructor functions and options types. + +### Breaking Changes +> These changes affect only code written against beta versions of `v1.8.0` +* The beta features for tracing and fakes have been omitted for this release. + +### Bugs Fixed + +* Fixed an issue that could cause some ARM RPs to not be automatically registered. +* Block bearer token authentication for non TLS protected endpoints. + +### Other Changes + +* Updated dependencies. + +## 1.8.0-beta.3 (2023-09-07) + +### Features Added + +* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. + +### Bugs Fixed + +* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. + +### Other Changes + +* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. + * `WithCaptureResponse` + * `WithHTTPHeader` + * `WithRetryOptions` + +## 1.7.2 (2023-09-06) + +### Bugs Fixed + +* Fix default HTTP transport to work in WASM modules. + +## 1.8.0-beta.2 (2023-08-14) + +### Features Added + +* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. +* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. + +### Breaking Changes + +> This change affects only code written against beta version `v1.8.0-beta.1`. +* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. + +> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. +* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. + +### Bugs Fixed + +* Propagate any query parameters when constructing a fake poller and/or injecting next links. + +## 1.7.1 (2023-08-14) + +## Bugs Fixed + +* Enable TLS renegotiation in the default transport policy. + +## 1.8.0-beta.1 (2023-07-12) + +### Features Added + +- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) + +### Other Changes + +* The beta features for CAE, tracing, and fakes have been reinstated. + ## 1.7.0 (2023-07-12) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go new file mode 100644 index 0000000000000..187fe82b97c49 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +const ( + providersKey = "providers" + subscriptionsKey = "subscriptions" + resourceGroupsLowerKey = "resourcegroups" + locationsKey = "locations" + builtInResourceNamespace = "Microsoft.Resources" +) + +// RootResourceID defines the tenant as the root parent of all other ResourceID. +var RootResourceID = &ResourceID{ + Parent: nil, + ResourceType: TenantResourceType, + Name: "", +} + +// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. +// Don't create this type directly, use ParseResourceID instead. +type ResourceID struct { + // Parent is the parent ResourceID of this instance. + // Can be nil if there is no parent. + Parent *ResourceID + + // SubscriptionID is the subscription ID in this resource ID. + // The value can be empty if the resource ID does not contain a subscription ID. + SubscriptionID string + + // ResourceGroupName is the resource group name in this resource ID. + // The value can be empty if the resource ID does not contain a resource group name. + ResourceGroupName string + + // Provider represents the provider name in this resource ID. + // This is only valid when the resource ID represents a resource provider. + // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights` + Provider string + + // Location is the location in this resource ID. + // The value can be empty if the resource ID does not contain a location name. + Location string + + // ResourceType represents the type of this resource ID. + ResourceType ResourceType + + // Name is the resource name of this resource ID. + Name string + + isChild bool + stringValue string +} + +// ParseResourceID parses a string to an instance of ResourceID +func ParseResourceID(id string) (*ResourceID, error) { + if len(id) == 0 { + return nil, fmt.Errorf("invalid resource ID: id cannot be empty") + } + + if !strings.HasPrefix(id, "/") { + return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id) + } + + parts := splitStringAndOmitEmpty(id, "/") + + if len(parts) < 2 { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(RootResourceID, parts, id) +} + +// String returns the string of the ResourceID +func (id *ResourceID) String() string { + if len(id.stringValue) > 0 { + return id.stringValue + } + + if id.Parent == nil { + return "" + } + + builder := strings.Builder{} + builder.WriteString(id.Parent.String()) + + if id.isChild { + builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType())) + if len(id.Name) > 0 { + builder.WriteString(fmt.Sprintf("/%s", id.Name)) + } + } else { + builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)) + } + + id.stringValue = builder.String() + + return id.stringValue +} + +func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) + return id +} + +func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, resourceType, resourceName, true) + return id +} + +func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false) + return id +} + +func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType { + if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) { + return ResourceGroupResourceType + } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() { + return SubscriptionResourceType + } + + return parent.ResourceType.AppendChild(resourceTypeName) +} + +func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) { + if parent != nil { + id.Provider = parent.Provider + id.SubscriptionID = parent.SubscriptionID + id.ResourceGroupName = parent.ResourceGroupName + id.Location = parent.Location + } + + if resourceType.String() == SubscriptionResourceType.String() { + id.SubscriptionID = name + } + + if resourceType.lastType() == locationsKey { + id.Location = name + } + + if resourceType.String() == ResourceGroupResourceType.String() { + id.ResourceGroupName = name + } + + if resourceType.String() == ProviderResourceType.String() { + id.Provider = name + } + + if parent == nil { + id.Parent = RootResourceID + } else { + id.Parent = parent + } + id.isChild = isChild + id.ResourceType = resourceType + id.Name = name +} + +func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) { + if len(parts) == 0 { + return parent, nil + } + + if len(parts) == 1 { + // subscriptions and resourceGroups are not valid ids without their names + if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + // resourceGroup must contain either child or provider resource type + if parent.ResourceType.String() == ResourceGroupResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return newResourceID(parent, parts[0], ""), nil + } + + if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { + //provider resource can only be on a tenant or a subscription parent + if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id) + } + + if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id) + } + + if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id) + } + + return nil, fmt.Errorf("invalid resource ID: %s", id) +} + +func splitStringAndOmitEmpty(v, sep string) []string { + r := make([]string, 0) + for _, s := range strings.Split(v, sep) { + if len(s) == 0 { + continue + } + r = append(r, s) + } + + return r +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go new file mode 100644 index 0000000000000..ca03ac9713d54 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +// SubscriptionResourceType is the ResourceType of a subscription +var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions") + +// ResourceGroupResourceType is the ResourceType of a resource group +var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups") + +// TenantResourceType is the ResourceType of a tenant +var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants") + +// ProviderResourceType is the ResourceType of a provider +var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers") + +// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets". +// Don't create this type directly, use ParseResourceType or NewResourceType instead. +type ResourceType struct { + // Namespace is the namespace of the resource type. + // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets" + Namespace string + + // Type is the full type name of the resource type. + // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets" + Type string + + // Types is the slice of all the sub-types of this resource type. + // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets" + Types []string + + stringValue string +} + +// String returns the string of the ResourceType +func (t ResourceType) String() string { + return t.stringValue +} + +// IsParentOf returns true when the receiver is the parent resource type of the child. +func (t ResourceType) IsParentOf(child ResourceType) bool { + if !strings.EqualFold(t.Namespace, child.Namespace) { + return false + } + if len(t.Types) >= len(child.Types) { + return false + } + for i := range t.Types { + if !strings.EqualFold(t.Types[i], child.Types[i]) { + return false + } + } + + return true +} + +// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it. +func (t ResourceType) AppendChild(childType string) ResourceType { + return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType)) +} + +// NewResourceType creates an instance of ResourceType using a provider namespace +// such as "Microsoft.Network" and type such as "virtualNetworks/subnets". +func NewResourceType(providerNamespace, typeName string) ResourceType { + return ResourceType{ + Namespace: providerNamespace, + Type: typeName, + Types: splitStringAndOmitEmpty(typeName, "/"), + stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName), + } +} + +// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets) +// or a resource identifier string. +// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet) +func ParseResourceType(resourceIDOrType string) (ResourceType, error) { + // split the path into segments + parts := splitStringAndOmitEmpty(resourceIDOrType, "/") + + // There must be at least a namespace and type name + if len(parts) < 1 { + return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType) + } + + // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace + if len(parts) == 1 { + // Simple resource type + return NewResourceType(builtInResourceNamespace, parts[0]), nil + } else if strings.Contains(parts[0], ".") { + // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets) + // it is a full type name + return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil + } else { + // Check if ResourceID + id, err := ParseResourceID(resourceIDOrType) + if err != nil { + return ResourceType{}, err + } + return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil + } +} + +func (t ResourceType) lastType() string { + return t.Types[len(t.Types)-1] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go new file mode 100644 index 0000000000000..83cf91e3ecb5b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The policy will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // policy's credential must support multitenant authentication. + AuxiliaryTenants []string + + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// RegistrationOptions configures the registration policy's behavior. +// All zero-value fields will be initialized with their default values. +type RegistrationOptions struct { + policy.ClientOptions + + // MaxAttempts is the total number of times to attempt automatic registration + // in the event that an attempt fails. + // The default value is 3. + // Set to a value less than zero to disable the policy. + MaxAttempts int + + // PollingDelay is the amount of time to sleep between polling intervals. + // The default value is 15 seconds. + // A value less than zero means no delay between polling intervals (not recommended). + PollingDelay time.Duration + + // PollingDuration is the amount of time to wait before abandoning polling. + // The default valule is 5 minutes. + // NOTE: Setting this to a small value might cause the policy to prematurely fail. + PollingDuration time.Duration +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions struct { + policy.ClientOptions + + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The client will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // client's credential must support multitenant authentication. + AuxiliaryTenants []string + + // DisableRPRegistration disables the auto-RP registration policy. Defaults to false. + DisableRPRegistration bool +} + +// Clone return a deep copy of the current options. +func (o *ClientOptions) Clone() *ClientOptions { + if o == nil { + return nil + } + copiedOptions := *o + copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services) + copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders) + copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams) + copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes) + copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies) + copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies) + return &copiedOptions +} + +// copyMap return a new map with all the key value pair in the src map +func copyMap[K comparable, V any](src map[K]V) map[K]V { + if src == nil { + return nil + } + copiedMap := make(map[K]V) + for k, v := range src { + copiedMap[k] = v + } + return copiedMap +} + +// copyMap return a new array with all the elements in the src array +func copyArray[T any](src []T) []T { + if src == nil { + return nil + } + copiedArray := make([]T, len(src)) + copy(copiedArray, src) + return copiedArray +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go new file mode 100644 index 0000000000000..302c19cd4265f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are +// placed after policies from PipelineOptions. The telemetry policy, when enabled, will +// use the specified module and version info. +func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) { + if options == nil { + options = &armpolicy.ClientOptions{} + } + conf, err := getConfiguration(&options.ClientOptions) + if err != nil { + return azruntime.Pipeline{}, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ + AuxiliaryTenants: options.AuxiliaryTenants, + Scopes: []string{conf.Audience + "/.default"}, + }) + perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) + copy(perRetry, plOpts.PerRetry) + plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + if !options.DisableRPRegistration { + regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} + regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) + if err != nil { + return azruntime.Pipeline{}, err + } + perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) + copy(perCall, plOpts.PerCall) + plOpts.PerCall = append(perCall, regPolicy) + } + if plOpts.APIVersion.Name == "" { + plOpts.APIVersion.Name = "api-version" + } + return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil +} + +func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) { + c := cloud.AzurePublic + if !reflect.ValueOf(o.Cloud).IsZero() { + c = o.Cloud + } + if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" { + return conf, nil + } else { + return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go new file mode 100644 index 0000000000000..54b3bb78d8595 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + +// acquiringResourceState holds data for an auxiliary token request +type acquiringResourceState struct { + ctx context.Context + p *BearerTokenPolicy + tenant string +} + +// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function. +func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{ + EnableCAE: true, + Scopes: state.p.scopes, + TenantID: state.tenant, + }) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState] + btp *azruntime.BearerTokenPolicy + cred azcore.TokenCredential + scopes []string +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &armpolicy.BearerTokenOptions{} + } + p := &BearerTokenPolicy{cred: cred} + p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants)) + for _, t := range opts.AuxiliaryTenants { + p.auxResources[t] = temporal.NewResource(acquireAuxToken) + } + p.scopes = make([]string, len(opts.Scopes)) + copy(p.scopes, opts.Scopes) + p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + AuthorizationHandler: azpolicy.AuthorizationHandler{ + OnChallenge: p.onChallenge, + OnRequest: p.onRequest, + }, + }) + return p +} + +func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { + challenge := res.Header.Get(shared.HeaderWWWAuthenticate) + claims, err := parseChallenge(challenge) + if err != nil { + // the challenge contains claims we can't parse + return err + } else if claims != "" { + // request a new token having the specified claims, send the request again + return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) + } + // auth challenge didn't include claims, so this is a simple authorization failure + return azruntime.NewResponseError(res) +} + +// onRequest authorizes requests with one or more bearer tokens +func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { + // authorize the request with a token for the primary tenant + err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + if err != nil || len(b.auxResources) == 0 { + return err + } + // add tokens for auxiliary tenants + as := acquiringResourceState{ + ctx: req.Raw().Context(), + p: b, + } + auxTokens := make([]string, 0, len(b.auxResources)) + for tenant, er := range b.auxResources { + as.tenant = tenant + auxTk, err := er.Get(as) + if err != nil { + return err + } + auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token)) + } + req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", ")) + return nil +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + return b.btp.Do(req) +} + +// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token +// that will satisfy conditional access policies. It returns a non-nil error when the given value contains +// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. +func parseChallenge(wwwAuthenticate string) (string, error) { + claims := "" + var err error + for _, param := range strings.Split(wwwAuthenticate, ",") { + if _, after, found := strings.Cut(param, "claims="); found { + if claims != "" { + // The header contains multiple challenges, at least two of which specify claims. The specs allow this + // but it's unclear what a client should do in this case and there's as yet no concrete example of it. + err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) + break + } + // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded + claims = strings.Trim(after, `\"=`) + // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" + if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { + claims = string(b) + } else { + err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) + break + } + } + } + return claims, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go new file mode 100644 index 0000000000000..83e15949aa36e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -0,0 +1,347 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const ( + // LogRPRegistration entries contain information specific to the automatic registration of an RP. + // Entries of this classification are written IFF the policy needs to take any action. + LogRPRegistration log.Event = "RPRegistration" +) + +// init sets any default values +func setDefaults(r *armpolicy.RegistrationOptions) { + if r.MaxAttempts == 0 { + r.MaxAttempts = 3 + } else if r.MaxAttempts < 0 { + r.MaxAttempts = 0 + } + if r.PollingDelay == 0 { + r.PollingDelay = 15 * time.Second + } else if r.PollingDelay < 0 { + r.PollingDelay = 0 + } + if r.PollingDuration == 0 { + r.PollingDuration = 5 * time.Minute + } +} + +// NewRPRegistrationPolicy creates a policy object configured using the specified options. +// The policy controls whether an unregistered resource provider should automatically be +// registered. See https://aka.ms/rps-not-found for more information. +func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) { + if o == nil { + o = &armpolicy.RegistrationOptions{} + } + conf, err := getConfiguration(&o.ClientOptions) + if err != nil { + return nil, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}}) + p := &rpRegistrationPolicy{ + endpoint: conf.Endpoint, + pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions), + options: *o, + } + // init the copy + setDefaults(&p.options) + return p, nil +} + +type rpRegistrationPolicy struct { + endpoint string + pipeline runtime.Pipeline + options armpolicy.RegistrationOptions +} + +func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + if r.options.MaxAttempts == 0 { + // policy is disabled + return req.Next() + } + const registeredState = "Registered" + var rp string + var resp *http.Response + for attempts := 0; attempts < r.options.MaxAttempts; attempts++ { + var err error + // make the original request + resp, err = req.Next() + // getting a 409 is the first indication that the RP might need to be registered, check error response + if err != nil || resp.StatusCode != http.StatusConflict { + return resp, err + } + var reqErr requestError + if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil { + return resp, err + } + if reqErr.ServiceError == nil { + // missing service error info. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + if !isUnregisteredRPCode(reqErr.ServiceError.Code) { + // not a 409 due to unregistered RP. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + // RP needs to be registered. start by getting the subscription ID from the original request + subID, err := getSubscription(req.Raw().URL.Path) + if err != nil { + return resp, err + } + // now get the RP from the error + rp, err = getProvider(reqErr) + if err != nil { + return resp, err + } + logRegistrationExit := func(v interface{}) { + log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) + } + log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) + // create client and make the registration request + // we use the scheme and host from the original request + rpOps := &providersOperations{ + p: r.pipeline, + u: r.endpoint, + subID: subID, + } + if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { + logRegistrationExit(err) + return resp, err + } + + // RP was registered, however we need to wait for the registration to complete + pollCtx, pollCancel := context.WithTimeout(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, r.options.PollingDuration) + var lastRegState string + for { + // get the current registration state + getResp, err := rpOps.Get(pollCtx, rp) + if err != nil { + pollCancel() + logRegistrationExit(err) + return resp, err + } + if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) { + // registration state has changed, or was updated for the first time + lastRegState = *getResp.Provider.RegistrationState + log.Writef(LogRPRegistration, "registration state is %s", lastRegState) + } + if strings.EqualFold(lastRegState, registeredState) { + // registration complete + pollCancel() + logRegistrationExit(lastRegState) + break + } + // wait before trying again + select { + case <-time.After(r.options.PollingDelay): + // continue polling + case <-pollCtx.Done(): + pollCancel() + logRegistrationExit(pollCtx.Err()) + return resp, pollCtx.Err() + } + } + // RP was successfully registered, retry the original request + err = req.RewindBody() + if err != nil { + return resp, err + } + } + // if we get here it means we exceeded the number of attempts + return resp, fmt.Errorf("exceeded attempts to register %s", rp) +} + +var unregisteredRPCodes = []string{ + "MissingSubscriptionRegistration", + "MissingRegistrationForResourceProvider", + "Subscription Not Registered", + "SubscriptionNotRegistered", +} + +func isUnregisteredRPCode(errorCode string) bool { + for _, code := range unregisteredRPCodes { + if strings.EqualFold(errorCode, code) { + return true + } + } + return false +} + +func getSubscription(path string) (string, error) { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1], nil + } + } + return "", fmt.Errorf("failed to obtain subscription ID from %s", path) +} + +func getProvider(re requestError) (string, error) { + if len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0].Target, nil + } + return "", errors.New("unexpected empty Details") +} + +// minimal error definitions to simplify detection +type requestError struct { + ServiceError *serviceError `json:"error"` +} + +type serviceError struct { + Code string `json:"code"` + Details []serviceErrorDetails `json:"details"` +} + +type serviceErrorDetails struct { + Code string `json:"code"` + Target string `json:"target"` +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +// the following code was copied from module armresources, providers.go and models.go +// only the minimum amount of code was copied to get this working and some edits were made. +/////////////////////////////////////////////////////////////////////////////////////////////// + +type providersOperations struct { + p runtime.Pipeline + u string + subID string +} + +// Get - Gets the specified resource provider. +func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.getCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.getHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// getCreateRequest creates the Get request. +func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// Register - Registers a subscription with a resource provider. +func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.registerCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.registerHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// registerCreateRequest creates the Register request. +func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// registerHandleResponse handles the Register response. +func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// ProviderResponse is the response envelope for operations that return a Provider type. +type providerResponse struct { + // Resource provider information. + Provider *provider + + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// Provider - Resource provider information. +type provider struct { + // The provider ID. + ID *string `json:"id,omitempty"` + + // The namespace of the resource provider. + Namespace *string `json:"namespace,omitempty"` + + // The registration policy of the resource provider. + RegistrationPolicy *string `json:"registrationPolicy,omitempty"` + + // The registration state of the resource provider. + RegistrationState *string `json:"registrationState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go new file mode 100644 index 0000000000000..6cea184240f28 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span +func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + rt, err := resource.ParseResourceType(req.Raw().URL.Path) + if err == nil { + // add the namespace attribute to the current span + span := tracer.SpanFromContext(req.Raw().Context()) + span.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: rt.Namespace}) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go new file mode 100644 index 0000000000000..1400d43799f32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + +func init() { + cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.chinacloudapi.cn", + Endpoint: "https://management.chinacloudapi.cn", + } + cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.usgovcloudapi.net", + Endpoint: "https://management.usgovcloudapi.net", + } + cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.windows.net/", + Endpoint: "https://management.azure.com", + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 9fae2a9dc4b63..8eef8633a7e80 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -22,6 +22,24 @@ type AccessToken = exported.AccessToken // TokenCredential represents a credential capable of providing an OAuth token. type TokenCredential = exported.TokenCredential +// KeyCredential contains an authentication key used to authenticate to an Azure service. +type KeyCredential = exported.KeyCredential + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return exported.NewKeyCredential(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +type SASCredential = exported.SASCredential + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return exported.NewSASCredential(sas) +} + // holds sentinel values used to send nulls var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} @@ -66,7 +84,9 @@ func IsNullValue[T any](v T) bool { return false } -// ClientOptions contains configuration settings for a client's pipeline. +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions = policy.ClientOptions // Client is a basic HTTP client. It consists of a pipeline and tracing provider. @@ -75,22 +95,17 @@ type Client struct { tr tracing.Tracer // cached on the client to support shallow copying with new values - tp tracing.Provider - modVer string + tp tracing.Provider + modVer string + namespace string } // NewClient creates a new Client instance with the provided values. -// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider. -// if module and package are the same value, the "module/" prefix can be omitted. -// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. // - plOpts - pipeline configuration options; can be the zero-value // - options - optional client configurations; pass nil to accept the default values -func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { - mod, client, err := shared.ExtractModuleName(clientName) - if err != nil { - return nil, err - } - +func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { if options == nil { options = &ClientOptions{} } @@ -101,15 +116,19 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, } } - pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options) + pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) - tr := options.TracingProvider.NewTracer(client, moduleVersion) + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + if tr.Enabled() && plOpts.Tracing.Namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) + } return &Client{ - pl: pl, - tr: tr, - tp: options.TracingProvider, - modVer: moduleVersion, + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + namespace: plOpts.Tracing.Namespace, }, nil } @@ -128,5 +147,8 @@ func (c *Client) Tracer() tracing.Tracer { // - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans func (c *Client) WithClientName(clientName string) *Client { tr := c.tp.NewTracer(clientName, c.modVer) - return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer} + if tr.Enabled() && c.namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) + } + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go index 28c64678c769e..654a5f4043147 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -253,5 +253,12 @@ When resuming a poller, no IO is performed, and zero-value arguments can be used Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO BeginA() will result in an error. + +# Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. */ package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index a1236b3625267..f2b296b6dc7cf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -8,8 +8,11 @@ package exported import ( "context" + "encoding/base64" + "fmt" "io" "net/http" + "sync/atomic" "time" ) @@ -51,6 +54,17 @@ type AccessToken struct { // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. // Exported as policy.TokenRequestOptions. type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string + + // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, + // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for + // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up + // in a loop retrying an API call with a token that has been revoked due to CAE. + EnableCAE bool + // Scopes contains the list of permission scopes required for the token. Scopes []string @@ -65,3 +79,97 @@ type TokenCredential interface { // GetToken requests an access token for the specified set of scopes. GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) } + +// DecodeByteArray will base-64 decode the provided string into v. +// Exported as runtime.DecodeByteArray() +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +// Exported as azcore.KeyCredential. +type KeyCredential struct { + cred *keyCredential +} + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return &KeyCredential{cred: newKeyCredential(key)} +} + +// Update replaces the existing key with the specified value. +func (k *KeyCredential) Update(key string) { + k.cred.Update(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +// Exported as azcore.SASCredential. +type SASCredential struct { + cred *keyCredential +} + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return &SASCredential{cred: newKeyCredential(sas)} +} + +// Update replaces the existing shared access signature with the specified value. +func (k *SASCredential) Update(sas string) { + k.cred.Update(sas) +} + +// KeyCredentialGet returns the key for cred. +func KeyCredentialGet(cred *KeyCredential) string { + return cred.cred.Get() +} + +// SASCredentialGet returns the shared access sig for cred. +func SASCredentialGet(cred *SASCredential) string { + return cred.cred.Get() +} + +type keyCredential struct { + key atomic.Value // string +} + +func newKeyCredential(key string) *keyCredential { + keyCred := keyCredential{} + keyCred.key.Store(key) + return &keyCred +} + +func (k *keyCredential) Get() string { + return k.key.Load().(string) +} + +func (k *keyCredential) Update(key string) { + k.key.Store(key) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go index c44efd6eff574..e45f831ed2a48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -8,10 +8,7 @@ package exported import ( "errors" - "fmt" "net/http" - - "golang.org/x/net/http/httpguts" ) // Policy represents an extensibility point for the Pipeline that can mutate the specified @@ -75,23 +72,6 @@ func (p Pipeline) Do(req *Request) (*http.Response, error) { if req == nil { return nil, errors.New("request cannot be nil") } - // check copied from Transport.roundTrip() - for k, vv := range req.Raw().Header { - if !httpguts.ValidHeaderFieldName(k) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) - } - } - } req.policies = p.policies return req.Next() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index fa99d1b7ed1fd..8d1ae213c9587 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -8,6 +8,7 @@ package exported import ( "context" + "encoding/base64" "errors" "fmt" "io" @@ -18,6 +19,28 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" ) +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +// Exported as runtime.Base64Encoding +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// EncodeByteArray will base-64 encode the byte slice v. +// Exported as runtime.EncodeByteArray() +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + // Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. // Don't use this type directly, use NewRequest() instead. // Exported as policy.Request. @@ -102,46 +125,11 @@ func (req *Request) OperationValue(value interface{}) bool { // SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length // accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "", -// Content-Type won't be set. +// Content-Type won't be set, and if it was set, will be deleted. // Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser. func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { - var err error - var size int64 - if body != nil { - size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size - if err != nil { - return err - } - } - if size == 0 { - // treat an empty stream the same as a nil one: assign req a nil body - body = nil - // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content - // (Del is a no-op when the header has no value) - req.req.Header.Del(shared.HeaderContentLength) - } else { - _, err = body.Seek(0, io.SeekStart) - if err != nil { - return err - } - req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) - req.Raw().GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream - return body, err - } - } - // keep a copy of the body argument. this is to handle cases - // where req.Body is replaced, e.g. httputil.DumpRequest and friends. - req.body = body - req.req.Body = body - req.req.ContentLength = size - if contentType == "" { - // Del is a no-op when the header has no value - req.req.Header.Del(shared.HeaderContentType) - } else { - req.req.Header.Set(shared.HeaderContentType, contentType) - } - return nil + // clobber the existing Content-Type to preserve behavior + return SetBody(req, body, contentType, true) } // RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. @@ -170,6 +158,14 @@ func (req *Request) Clone(ctx context.Context) *Request { return &r2 } +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + // not exported but dependent on Request // PolicyFunc is a type that implements the Policy interface. @@ -180,3 +176,48 @@ type PolicyFunc func(*Request) (*http.Response, error) func (pf PolicyFunc) Do(req *Request) (*http.Response, error) { return pf(req) } + +// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly. +// - req is the request to modify +// - body is the request body; if nil or empty, Content-Length won't be set +// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted +// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType +func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error { + var err error + var size int64 + if body != nil { + size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + } + if size == 0 { + // treat an empty stream the same as a nil one: assign req a nil body + body = nil + // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content + // (Del is a no-op when the header has no value) + req.req.Header.Del(shared.HeaderContentLength) + } else { + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + } + // keep a copy of the body argument. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + if contentType == "" { + // Del is a no-op when the header has no value + req.req.Header.Del(shared.HeaderContentType) + } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType { + req.req.Header.Set(shared.HeaderContentType, contentType) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 7df2f88c1c1a4..f243552885d11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -25,7 +26,7 @@ func NewResponseError(resp *http.Response) error { } // prefer the error code in the response header - if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { respErr.ErrorCode = ec return respErr } @@ -112,33 +113,45 @@ type ResponseError struct { // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.RawResponse != nil { + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, separator) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + } else { + fmt.Fprintln(msg, "Missing RawResponse") + fmt.Fprintln(msg, separator) + } if e.ErrorCode != "" { fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) } else { fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - body, err := exported.Payload(e.RawResponse, nil) - if err != nil { - // this really shouldn't fail at this point as the response - // body is already cached (it was read in NewResponseError) - fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - if err := json.Indent(msg, body, "", " "); err != nil { - // failed to pretty-print so just dump it verbatim - fmt.Fprint(msg, string(body)) + if e.RawResponse != nil { + fmt.Fprintln(msg, separator) + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") } - // the standard library doesn't have a pretty-printer for XML - fmt.Fprintln(msg) - } else { - fmt.Fprintln(msg, "Response contained no body") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintln(msg, separator) return msg.String() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 0000000000000..25983471867b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// lroStatusURLSuffix is the URL path suffix for a faked LRO. +const lroStatusURLSuffix = "/get/fake/status" + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + qp := "" + if resp.Request.URL.RawQuery != "" { + qp = "?" + resp.Request.URL.RawQuery + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() + FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) +} + +// SanitizePollerPath removes any fake-appended suffix from a URL's path. +func SanitizePollerPath(path string) string { + return strings.TrimSuffix(path, lroStatusURLSuffix) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 53c8d353ad149..8f749f48d9bf4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -7,8 +7,9 @@ package shared const ( - ContentTypeAppJSON = "application/json" - ContentTypeAppXML = "application/xml" + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" + ContentTypeTextPlain = "text/plain" ) const ( @@ -17,20 +18,27 @@ const ( HeaderAzureAsync = "Azure-AsyncOperation" HeaderContentLength = "Content-Length" HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" ) const BearerTokenPrefix = "Bearer " +const TracingNamespaceAttrName = "az.namespace" + const ( // Module is the name of the calling module used in telemetry data. Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.7.0" + Version = "v1.9.2" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index db0aaa7cb9562..d3da2c5fdfa34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -16,14 +16,23 @@ import ( "time" ) +// NOTE: when adding a new context key type, it likely needs to be +// added to the deny-list of key types in ContextWithDeniedValues + // CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. type CtxWithHTTPHeaderKey struct{} // CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. type CtxWithRetryOptionsKey struct{} -// CtxIncludeResponseKey is used as a context key for retrieving the raw response. -type CtxIncludeResponseKey struct{} +// CtxWithCaptureResponse is used as a context key for retrieving the raw response. +type CtxWithCaptureResponse struct{} + +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { @@ -35,22 +44,64 @@ func Delay(ctx context.Context, delay time.Duration) error { } } -// RetryAfter returns non-zero if the response contains a Retry-After header value. +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after func RetryAfter(resp *http.Response) time.Duration { if resp == nil { return 0 } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } + } + return 0 } @@ -78,26 +129,21 @@ func ValidateModVer(moduleVersion string) error { return nil } -// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or -// "package", "package.Client" from "package.Client" when there's no "module/" prefix. -// If clientName is malformed, an error is returned. -func ExtractModuleName(clientName string) (string, string, error) { - // uses unnamed capturing for "module", "package.Client", and "package" - regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`) - if err != nil { - return "", "", err - } - - matches := regex.FindStringSubmatch(clientName) - if len(matches) < 4 { - return "", "", fmt.Errorf("malformed clientName %s", clientName) - } +// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. +// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's +// context with an instance of this type. This is to prevent context values from flowing across disjoint +// requests which can have unintended side-effects. +type ContextWithDeniedValues struct { + context.Context +} - // the first match is the entire string, the second is "module", the third is - // "package.Client" and the fourth is "package". - // if there was no "module/" prefix, the second match will be the empty string - if matches[1] != "" { - return matches[1], matches[2], nil +// Value implements part of the [context.Context] interface. +// It acts as a deny-list for certain context keys. +func (c *ContextWithDeniedValues) Value(key any) any { + switch key.(type) { + case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: + return nil + default: + return c.Context.Value(key) } - return matches[3], matches[2], nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index b200047834cea..d934f1dc5fa3e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -7,11 +7,13 @@ package policy import ( + "context" "net/http" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) @@ -27,9 +29,11 @@ type Transporter = exported.Transporter type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. -// All zero-value fields will be initialized with default values. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions struct { - // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. APIVersion string // Cloud specifies a cloud for the client. The default is Azure Public Cloud. @@ -162,3 +166,22 @@ type AuthorizationHandler struct { // the policy will return any 401 response to the client. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index 5507665d651db..cffe692d7e30d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -10,6 +10,12 @@ import ( "context" "encoding/json" "errors" + "fmt" + "net/http" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // PagingHandler contains the required data for constructing a Pager. @@ -20,12 +26,16 @@ type PagingHandler[T any] struct { // Fetcher fetches the first and subsequent pages. Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer } // Pager provides operations for iterating over paged responses. type Pager[T any] struct { current *T handler PagingHandler[T] + tracer tracing.Tracer firstPage bool } @@ -34,6 +44,7 @@ type Pager[T any] struct { func NewPager[T any](handler PagingHandler[T]) *Pager[T] { return &Pager[T]{ handler: handler, + tracer: handler.Tracer, firstPage: true, } } @@ -48,8 +59,6 @@ func (p *Pager[T]) More() bool { // NextPage advances the pager to the next page. func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { - var resp T - var err error if p.current != nil { if p.firstPage { // we get here if it's an LRO-pager, we already have the first page @@ -58,12 +67,16 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { } else if !p.handler.More(*p.current) { return *new(T), errors.New("no more pages") } - resp, err = p.handler.Fetcher(ctx, p.current) } else { // non-LRO case, first page p.firstPage = false - resp, err = p.handler.Fetcher(ctx, nil) } + + var err error + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err := p.handler.Fetcher(ctx, p.current) if err != nil { return *new(T), err } @@ -75,3 +88,41 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { func (p *Pager[T]) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.current) } + +// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. +type FetcherForNextLinkOptions struct { + // NextReq is the func to be called when requesting subsequent pages. + // Used for paged operations that have a custom next link operation. + NextReq func(context.Context, string) (*policy.Request, error) +} + +// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. +// - ctx is the [context.Context] controlling the lifetime of the HTTP operation +// - pl is the [Pipeline] used to dispatch the HTTP request +// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested +// - firstReq is the func to be called when creating the request for the first page +// - options contains any optional parameters, pass nil to accept the default values +func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { + var req *policy.Request + var err error + if nextLink == "" { + req, err = firstReq(ctx) + } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { + if options != nil && options.NextReq != nil { + req, err = options.NextReq(ctx, nextLink) + } else { + req, err = NewRequest(ctx, http.MethodGet, nextLink) + } + } + if err != nil { + return nil, err + } + resp, err := pl.Do(req) + if err != nil { + return nil, err + } + if !HasStatusCode(resp, http.StatusOK) { + return nil, NewResponseError(resp) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go index 9d9288f53d3dd..6b1f5c083eb62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -13,9 +13,35 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { - AllowedHeaders, AllowedQueryParameters []string - APIVersion APIVersionOptions - PerCall, PerRetry []policy.Policy + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // Tracing contains options used to configure distributed tracing. + Tracing TracingOptions +} + +// TracingOptions contains tracing options for SDK developers. +type TracingOptions struct { + // Namespace contains the value to use for the az.namespace span attribute. + Namespace string } // Pipeline represents a primitive for sending HTTP requests and receiving responses. @@ -56,8 +82,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy policies = append(policies, NewRetryPolicy(&cp.Retry)) policies = append(policies, plOpts.PerRetry...) policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) policies = append(policies, NewLogPolicy(&cp.Logging)) - policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) transport := cp.Transport if transport == nil { transport = defaultHTTPClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index b61e4c121f69a..f0f280355955a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -6,6 +6,7 @@ package runtime import ( "errors" "net/http" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -34,7 +35,7 @@ type acquiringResourceState struct { // acquire acquires or updates the resource; only one // thread/goroutine at a time ever calls this function func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { - tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) if err != nil { return exported.AccessToken{}, time.Time{}, err } @@ -72,6 +73,17 @@ func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(p // Do authorizes a request with a bearer token func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no TokenCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if b.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + var err error if b.authzHandler.OnRequest != nil { err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) @@ -79,7 +91,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) } if err != nil { - return nil, ensureNonRetriable(err) + return nil, errorinfo.NonRetriableError(err) } res, err := req.Next() @@ -95,22 +107,15 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { } } } - return res, ensureNonRetriable(err) -} - -func ensureNonRetriable(err error) error { - var nre errorinfo.NonRetriable - if err != nil && !errors.As(err, &nre) { - err = btpError{err} + if err != nil { + err = errorinfo.NonRetriableError(err) } - return err + return res, err } -// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize -type btpError struct { - error +func checkHTTPSForAuth(req *policy.Request) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil } - -func (btpError) NonRetriable() {} - -var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go index 770e0a2b6a64f..c230af0afa89e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -34,6 +34,7 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { // WithHTTPHeader adds the specified http.Header to the parent context. // Use this to specify custom HTTP headers at the API-call level. // Any overlapping headers will have their values replaced with the values specified here. +// Deprecated: use [policy.WithHTTPHeader] instead. func WithHTTPHeader(parent context.Context, header http.Header) context.Context { - return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) + return policy.WithHTTPHeader(parent, header) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 0000000000000..3df1c1218901b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,143 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + // calling *url.Error.Error() will include the unsanitized URL + // which we don't want. in addition, we already have the HTTP verb + // and sanitized URL in the trace so we aren't losing any info + err = urlErr.Err + } + span.SetStatus(tracing.SpanStatusError, err.Error()) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // for future expansion +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + + // we MUST propagate the active tracer before returning so that the trace policy can access it + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + + const newSpanKind = tracing.SpanKindInternal + if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { + // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), + // then the span for Bar() must be suppressed. however, if Bar() makes a REST + // call, then Bar's HTTP span must be a child of Foo's span. + // however, there is an exception to this rule. if the SDK method Foo() is a + // messaging producer/consumer, and it takes a callback that's a SDK method + // Bar(), then the span for Bar() must _not_ be suppressed. + if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { + return ctx, func(err error) {} + } + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: newSpanKind, + }) + ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} + +// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. +type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go index 4714baa30cd68..bb00f6c2fdb7b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -20,7 +20,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { if resp == nil { return resp, err } - if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { httpOut := httpOutRaw.(**http.Response) *httpOut = resp } @@ -29,6 +29,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { // WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. // The resp parameter will contain the HTTP response after the request has completed. +// Deprecated: use [policy.WithCaptureResponse] instead. func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { - return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) + return policy.WithCaptureResponse(parent, resp) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go new file mode 100644 index 0000000000000..6f577fa7a9e43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. +type KeyCredentialPolicy struct { + cred *exported.KeyCredential + header string + prefix string +} + +// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. +type KeyCredentialPolicyOptions struct { + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. + Prefix string +} + +// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. +// - cred is the [azcore.KeyCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the key is placed +// - options contains optional configuration, pass nil to accept the default values +func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { + if options == nil { + options = &KeyCredentialPolicyOptions{} + } + return &KeyCredentialPolicy{ + cred: cred, + header: header, + prefix: options.Prefix, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no KeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go index 8514f57d5c2f1..f048d7fb53f5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -191,7 +191,8 @@ func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { } sort.Strings(keys) for _, k := range keys { - value := header.Get(k) + // don't use Get() as it will canonicalize k which might cause a mismatch + value := header[k][0] // redact all header values not in the allow-list if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { value = redactedValue diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index e0c5929f3b708..04d7bb4ecbc66 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,15 +59,7 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - pow := func(number int64, exponent int32) int64 { // pow is nested helper function - var result int64 = 1 - for n := int32(0); n < exponent; n++ { - result *= number - } - return result - } - - delay := time.Duration(pow(2, try)-1) * o.RetryDelay + delay := time.Duration((1< -1 { + mod = mod[i+1:] + } b.WriteString(formatTelemetry(mod, ver)) b.WriteRune(' ') b.WriteString(platformInfo) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 3d029a3d15b71..c373f68962e3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -13,6 +13,8 @@ import ( "flag" "fmt" "net/http" + "reflect" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -20,9 +22,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" ) @@ -54,6 +58,9 @@ type NewPollerOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPoller creates a Poller based on the provided initial response. @@ -70,6 +77,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: options.Handler, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -83,7 +91,9 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol // determine the polling method var opr PollingHandler[T] var err error - if async.Applicable(resp) { + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { // async poller must be checked first as it can also have a location header opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { @@ -110,6 +120,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: opr, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -121,6 +132,9 @@ type NewPollerFromResumeTokenOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPollerFromResumeToken creates a Poller from a resume token string. @@ -147,7 +161,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options opr := options.Handler // now rehydrate the poller based on the encoded poller type - if opr != nil { + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) } else if async.CanResume(asJSON) { opr, _ = async.New[T](pl, nil, "") @@ -166,6 +182,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options return &Poller[T]{ op: opr, result: result, + tracer: options.Tracer, }, nil } @@ -188,6 +205,7 @@ type Poller[T any] struct { resp *http.Response err error result *T + tracer tracing.Tracer done bool } @@ -203,7 +221,7 @@ type PollUntilDoneOptions struct { // options: pass nil to accept the default values. // NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might // benefit from a shorter or longer duration. -func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { if options == nil { options = &PollUntilDoneOptions{} } @@ -212,9 +230,13 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt cp.Frequency = 30 * time.Second } + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + // skip the floor check when executing tests so they don't take so long if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { - return *new(T), errors.New("polling frequency minimum is one second") + err = errors.New("polling frequency minimum is one second") + return } start := time.Now() @@ -226,22 +248,24 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // initial check for a retry-after header existing on the initial response if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) - if err := shared.Delay(ctx, retryAfter); err != nil { + if err = shared.Delay(ctx, retryAfter); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } // begin polling the endpoint until a terminal state is reached for { - resp, err := p.Poll(ctx) + var resp *http.Response + resp, err = p.Poll(ctx) if err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } if p.Done() { logPollUntilDoneExit("succeeded") - return p.Result(ctx) + res, err = p.Result(ctx) + return } d := cp.Frequency if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { @@ -252,7 +276,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } if err = shared.Delay(ctx, d); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } @@ -261,17 +285,22 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // If Poll succeeds, the poller's state is updated and the HTTP response is returned. // If Poll fails, the poller's state is unmodified and the error is returned. // Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { if p.Done() { // the LRO has reached a terminal state, don't poll again - return p.resp, nil + resp = p.resp + return } - resp, err := p.op.Poll(ctx) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) if err != nil { - return nil, err + return } p.resp = resp - return p.resp, nil + return } // Done returns true if the LRO has reached a terminal state. @@ -284,31 +313,40 @@ func (p *Poller[T]) Done() bool { // If the LRO completed successfully, a populated instance of T is returned. // If the LRO failed or was canceled, an *azcore.ResponseError error is returned. // Calling this on an LRO in a non-terminal state will return an error. -func (p *Poller[T]) Result(ctx context.Context) (T, error) { +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { if !p.Done() { - return *new(T), errors.New("poller is in a non-terminal state") + err = errors.New("poller is in a non-terminal state") + return } if p.done { // the result has already been retrieved, return the cached value if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } - err := p.op.Result(ctx, p.result) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { // the LRO failed. record the error p.err = err } else if err != nil { // the call to Result failed, don't cache anything in this case - return *new(T), err + return } p.done = true if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } // ResumeToken returns a value representing the poller that can be used to resume @@ -325,3 +363,22 @@ func (p *Poller[T]) ResumeToken() (string, error) { } return tk, err } + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 98e00718488e0..5d1569c8dd2b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -9,17 +9,14 @@ package runtime import ( "bytes" "context" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "mime/multipart" - "os" + "net/url" "path" - "reflect" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -28,14 +25,14 @@ import ( // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when // encoding/decoding a slice of bytes to/from a string. -type Base64Encoding int +type Base64Encoding = exported.Base64Encoding const ( // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. - Base64StdFormat Base64Encoding = 0 + Base64StdFormat Base64Encoding = exported.Base64StdFormat // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. - Base64URLFormat Base64Encoding = 1 + Base64URLFormat Base64Encoding = exported.Base64URLFormat ) // NewRequest creates a new policy.Request with the specified input. @@ -44,6 +41,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// EncodeQueryParams will parse and encode any query parameters in the specified URL. +func EncodeQueryParams(u string) (string, error) { + before, after, found := strings.Cut(u, "?") + if !found { + return u, nil + } + qp, err := url.ParseQuery(after) + if err != nil { + return "", err + } + return before + "?" + qp.Encode(), nil +} + // JoinPaths concatenates multiple URL path segments into one path, // inserting path separation characters as required. JoinPaths will preserve // query parameters in the root path @@ -79,10 +89,7 @@ func JoinPaths(root string, paths ...string) string { // EncodeByteArray will base-64 encode the byte slice v. func EncodeByteArray(v []byte, format Base64Encoding) string { - if format == Base64URLFormat { - return base64.RawURLEncoding.EncodeToString(v) - } - return base64.StdEncoding.EncodeToString(v) + return exported.EncodeByteArray(v, format) } // MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. @@ -90,19 +97,18 @@ func EncodeByteArray(v []byte, format Base64Encoding) string { func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { // send as a JSON string encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) - return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false) } // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. func MarshalAsJSON(req *policy.Request, v interface{}) error { - if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { - v = cloneWithoutReadOnlyFields(v) - } b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) } - return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) + // tsp generated code can set Content-Type so we must prefer that + return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false) } // MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. @@ -169,80 +175,5 @@ func SkipBodyDownload(req *policy.Request) { req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) } -// returns a clone of the object graph pointed to by v, omitting values of all read-only -// fields. if there are no read-only fields in the object graph, no clone is created. -func cloneWithoutReadOnlyFields(v interface{}) interface{} { - val := reflect.Indirect(reflect.ValueOf(v)) - if val.Kind() != reflect.Struct { - // not a struct, skip - return v - } - // first walk the graph to find any R/O fields. - // if there aren't any, skip cloning the graph. - if !recursiveFindReadOnlyField(val) { - return v - } - return recursiveCloneWithoutReadOnlyFields(val) -} - -// returns true if any field in the object graph of val contains the `azure:"ro"` tag value -func recursiveFindReadOnlyField(val reflect.Value) bool { - t := val.Type() - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - return true - } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { - return true - } - } - return false -} - -// clones the object graph of val. all non-R/O properties are copied to the clone -func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { - t := val.Type() - clone := reflect.New(t) - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - // omit from payload - continue - } - // clone field will receive the same value as the source field... - value := val.Field(i) - v := reflect.Indirect(value) - if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { - // ...unless the source value is a struct, in which case we recurse to clone that struct. - // (We can't recursively clone time.Time because it contains unexported fields.) - c := recursiveCloneWithoutReadOnlyFields(v) - if field.Anonymous { - // NOTE: this does not handle the case of embedded fields of unexported struct types. - // this should be ok as we don't generate any code like this at present - value = reflect.Indirect(reflect.ValueOf(c)) - } else { - value = reflect.ValueOf(c) - } - } - reflect.Indirect(clone).Field(i).Set(value) - } - return clone.Interface() -} - -// returns true if the "azure" tag contains the option "ro" -func azureTagIsReadOnly(tag string) bool { - if tag == "" { - return false - } - parts := strings.Split(tag, ",") - for _, part := range parts { - if part == "ro" { - return true - } - } - return false -} +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index d1f58e9e29597..003c875b1f56a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -8,13 +8,13 @@ package runtime import ( "bytes" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "net/http" + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -105,31 +105,5 @@ func removeBOM(resp *http.Response) error { // DecodeByteArray will base-64 decode the provided string into v. func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { - if len(s) == 0 { - return nil - } - payload := string(s) - if payload[0] == '"' { - // remove surrounding quotes - payload = payload[1 : len(payload)-1] - } - switch format { - case Base64StdFormat: - decoded, err := base64.StdEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - case Base64URLFormat: - // use raw encoding as URL format should not contain any '=' characters - decoded, err := base64.RawURLEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - default: - return fmt.Errorf("unrecognized byte array format: %d", format) - } + return azexported.DecodeByteArray(s, v, format) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go new file mode 100644 index 0000000000000..1c75d771f2e46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go @@ -0,0 +1,15 @@ +//go:build !wasm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go new file mode 100644 index 0000000000000..3dc9eeecddf62 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go @@ -0,0 +1,15 @@ +//go:build (js && wasm) || wasip1 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go index 869bed5118427..2124c1d48b9a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -11,6 +11,8 @@ import ( "net" "net/http" "time" + + "golang.org/x/net/http2" ) var defaultHTTPClient *http.Client @@ -18,19 +20,28 @@ var defaultHTTPClient *http.Client func init() { defaultTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ + DialContext: defaultTransportDialContext(&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - }).DialContext, + }), ForceAttemptHTTP2: true, MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateFreelyAsClient, }, } + // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed + if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { + // if the connection has been idle for 10 seconds, send a ping frame for a health check + http2Transport.ReadIdleTimeout = 10 * time.Second + // if there's no response to the ping within the timeout, the connection will be closed + http2Transport.PingTimeout = 5 * time.Second + } defaultHTTPClient = &http.Client{ Transport: defaultTransport, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go index 75f757cedd3b6..1ade7c560ff10 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -31,12 +31,12 @@ type Provider struct { newTracerFn func(name, version string) Tracer } -// NewTracer creates a new Tracer for the specified name and version. -// - name - the name of the tracer object, typically the fully qualified name of the service client -// - version - the version of the module in which the service client resides -func (p Provider) NewTracer(name, version string) (tracer Tracer) { +// NewTracer creates a new Tracer for the specified module name and version. +// - module - the fully qualified name of the module +// - version - the version of the module +func (p Provider) NewTracer(module, version string) (tracer Tracer) { if p.newTracerFn != nil { - tracer = p.newTracerFn(name, version) + tracer = p.newTracerFn(module, version) } return } @@ -45,21 +45,28 @@ func (p Provider) NewTracer(name, version string) (tracer Tracer) { // TracerOptions contains the optional values when creating a Tracer. type TracerOptions struct { - // for future expansion + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) Span } // NewTracer creates a Tracer with the specified values. // - newSpanFn is the underlying implementation for creating Span instances // - options contains optional values; pass nil to accept the default value func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } return Tracer{ - newSpanFn: newSpanFn, + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, } } // Tracer is the factory that creates Span instances. type Tracer struct { - newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) Span } // Start creates a new span and a context.Context that contains it. @@ -68,11 +75,37 @@ type Tracer struct { // - options contains optional values for the span, pass nil to accept any defaults func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { if t.newSpanFn != nil { - return t.newSpanFn(ctx, spanName, options) + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) } return ctx, Span{} } +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) Span { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{} +} + // SpanOptions contains optional settings for creating a span. type SpanOptions struct { // Kind indicates the kind of Span. @@ -97,9 +130,6 @@ type SpanImpl struct { // AddEvent contains the implementation for the Span.AddEvent method. AddEvent func(string, ...Attribute) - // AddError contains the implementation for the Span.AddError method. - AddError func(err error) - // SetStatus contains the implementation for the Span.SetStatus method. SetStatus func(SpanStatus, string) } @@ -140,13 +170,6 @@ func (s Span) AddEvent(name string, attrs ...Attribute) { } } -// AddError adds the specified error event to the span. -func (s Span) AddError(err error) { - if s.impl.AddError != nil { - s.impl.AddError(err) - } -} - // SetStatus sets the status on the span along with a description. func (s Span) SetStatus(code SpanStatus, desc string) { if s.impl.SetStatus != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index cc8034cf7a8ba..71dcb5f3e9525 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,86 @@ # Release History +## 1.5.1 (2024-01-17) + +### Bugs Fixed +* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly + +## 1.5.0 (2024-01-16) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +* Removed persistent token caching. It will return in v1.6.0-beta.1 + +### Bugs Fixed +* Credentials now preserve MSAL headers e.g. X-Client-Sku + +### Other Changes +* Upgraded dependencies + +## 1.5.0-beta.2 (2023-11-07) + +### Features Added +* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity +* Added spans for distributed tracing. + +## 1.5.0-beta.1 (2023-10-10) + +### Features Added +* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions` + on a credential's options to enable and configure this. See the package documentation for + this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more + details. +* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This + credential is also part of the `DefaultAzureCredential` authentication flow. + +## 1.4.0 (2023-10-10) + +### Bugs Fixed +* `ManagedIdentityCredential` will now retry when IMDS responds 410 or 503 + +## 1.4.0-beta.5 (2023-09-12) + +### Features Added +* Service principal credentials can request CAE tokens + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.4.0-beta.4 +* Whether `GetToken` requests a CAE token is now determined by `TokenRequestOptions.EnableCAE`. Azure + SDK clients which support CAE will set this option automatically. Credentials no longer request CAE + tokens by default or observe the environment variable "AZURE_IDENTITY_DISABLE_CP1". + +### Bugs Fixed +* Credential chains such as `DefaultAzureCredential` now try their next credential, if any, when + managed identity authentication fails in a Docker Desktop container + ([#21417](https://github.com/Azure/azure-sdk-for-go/issues/21417)) + +## 1.4.0-beta.4 (2023-08-16) + +### Other Changes +* Upgraded dependencies + +## 1.3.1 (2023-08-16) + +### Other Changes +* Upgraded dependencies + +## 1.4.0-beta.3 (2023-08-08) + +### Bugs Fixed +* One invocation of `AzureCLICredential.GetToken()` and `OnBehalfOfCredential.GetToken()` + can no longer make two authentication attempts + +## 1.4.0-beta.2 (2023-07-14) + +### Other Changes +* `DefaultAzureCredentialOptions.TenantID` applies to workload identity authentication +* Upgraded dependencies + +## 1.4.0-beta.1 (2023-06-06) + +### Other Changes +* Re-enabled CAE support as in v1.3.0-beta.3 + ## 1.3.0 (2023-05-09) ### Breaking Changes @@ -46,14 +127,14 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). - This indicates to Azure Active Directory that your application can handle CAE claims challenges. + This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) * Service principal and user credentials support ADFS authentication on Azure Stack. Specify "adfs" as the credential's tenant. * Applications running in private or disconnected clouds can prevent credentials from - requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery` field on credential options. * Many credentials can now be configured to authenticate in multiple tenants. The options types for these credentials have an `AdditionallyAllowedTenants` field @@ -406,4 +487,4 @@ ## 0.1.0 (2020-07-23) ### Features Added -* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. +* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 4ac53eb7b276e..1a649202303c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -18,7 +18,7 @@ This guide shows common authentication code using `autorest/adal` and its equiva ### `autorest/adal` -Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant: ```go import "github.com/Azure/go-autorest/autorest/adal" @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index da0baa9add3dc..b6ad2d39f8406 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -35,6 +35,12 @@ signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in t When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. +#### Authenticate via the Azure Developer CLI + +Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally. + +To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow. + ## Key concepts ### Credentials @@ -44,9 +50,7 @@ service client to authenticate requests. Service clients across the Azure SDK accept a credential instance when they are constructed, and use that credential to authenticate requests. -The `azidentity` module focuses on OAuth authentication with Azure Active -Directory (AAD). It offers a variety of credential types capable of acquiring -an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. +The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. ### DefaultAzureCredential @@ -58,6 +62,7 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T 1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. 1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. 1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. > Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. @@ -152,6 +157,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI +|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables @@ -161,16 +167,16 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_SECRET`|one of the application's client secrets #### Service principal with certificate |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key |`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any @@ -178,22 +184,30 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application |`AZURE_USERNAME`|a username (usually an email address) |`AZURE_PASSWORD`|that user's password Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used. +## Token caching + +Token caching is an `azidentity` feature that allows apps to: + +* Cache tokens in memory (default) or on disk (opt-in). +* Improve resilience and performance. +* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. + +For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching). + ## Troubleshooting ### Error Handling Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Azure Active Directory errors please refer to the -Azure Active Directory -[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD new file mode 100644 index 0000000000000..c0d6601469ccd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -0,0 +1,70 @@ +## Token caching in the Azure Identity client module + +*Token caching* is a feature provided by the Azure Identity library that allows apps to: + +- Improve their resilience and performance. +- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times the user is prompted to authenticate. + +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. + +Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. + +### In-memory token caching + +*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. + +**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. + +#### Caching cannot be disabled + +As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. + +### Persistent token caching + +> Only azidentity v1.5.0-beta versions support persistent token caching + +*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. + +| Operating system | Storage mechanism | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | DPAPI | + +By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. +However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. + +With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: + +- Makes the app more resilient to failures. +- Ensures the app can continue to function during an Entra ID outage or disruption. +- Avoids having to prompt users to authenticate each time the process is restarted. + +>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. + +#### Example code + +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. + +### Credentials supporting token caching + +The following table indicates the state of in-memory and persistent caching in each credential type. + +**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). + +| Credential | In-memory token caching | Persistent token caching | +|--------------------------------|---------------------------------------------------------------------|--------------------------| +| `AzureCLICredential` | Not Supported | Not Supported | +| `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `ClientAssertionCredential` | Supported | Supported | +| `ClientCertificateCredential` | Supported | Supported | +| `ClientSecretCredential` | Supported | Supported | +| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported | +| `DeviceCodeCredential` | Supported | Supported | +| `EnvironmentCredential` | Supported | Not Supported | +| `InteractiveBrowserCredential` | Supported | Supported | +| `ManagedIdentityCredential` | Supported | Not Supported | +| `OnBehalfOfCredential` | Supported | Supported | +| `UsernamePasswordCredential` | Supported | Supported | +| `WorkloadIdentityCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 7b7515ebac223..832c599eb90a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,7 +8,8 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) -- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -23,7 +24,7 @@ This troubleshooting guide covers failure investigation techniques, common error ## Handle azidentity errors -Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable. ### Permission issues @@ -31,7 +32,7 @@ Service client errors with a status code of 401 or 403 often indicate that authe ## Find relevant information in errors -Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: +Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message: ``` ClientSecretCredential authentication failed @@ -57,9 +58,9 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. -- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. ### Enable and configure logging @@ -76,12 +77,14 @@ azlog.SetListener(func(event azlog.Event, s string) { azlog.SetEvents(azidentity.EventAuthentication) ``` + ## Troubleshoot DefaultAzureCredential authentication issues | Error |Description| Mitigation | |---|---|---| |"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | |Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | +|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.| ## Troubleshoot EnvironmentCredential authentication issues @@ -94,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -170,7 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). -## Troubleshoot AzureCliCredential authentication issues +## Troubleshoot AzureCLICredential authentication issues | Error Message |Description| Mitigation | |---|---|---| @@ -193,6 +196,29 @@ az account get-access-token --output json --resource https://management.core.win > This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Troubleshoot AzureDeveloperCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|
    • Ensure the Azure Developer CLI is properly installed. See the installation instructions at [Install or update the Azure Developer CLI](https://learn.microsoft.com/azure/developer/azure-developer-cli/install-azd).
    • Validate the installation location has been added to the `PATH` environment variable.
    | +|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|
    • Log in to the Azure Developer CLI using the `azd login` command.
    • Validate that the Azure Developer CLI can obtain tokens. For instructions, see [Verify the Azure Developer CLI can obtain tokens](#verify-the-azure-developer-cli-can-obtain-tokens).
    | + +#### Verify the Azure Developer CLI can obtain tokens + +You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI. + +```sh +azd config list +``` + +Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account. + +```sh +azd auth token --output json --scope https://management.core.windows.net/.default +``` +>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security. + ## Troubleshoot `WorkloadIdentityCredential` authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 47e77f88e3f78..173ce2b3cdacc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_6225ab0470" + "Tag": "go/azidentity_db4a26f583" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go new file mode 100644 index 0000000000000..ada4d6501d2c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +var supportedAuthRecordVersions = []string{"1.0"} + +// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication +// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. +type authenticationRecord struct { + // Authority is the URL of the authority that issued the token. + Authority string `json:"authority"` + + // ClientID is the ID of the application that authenticated the user. + ClientID string `json:"clientId"` + + // HomeAccountID uniquely identifies the account. + HomeAccountID string `json:"homeAccountId"` + + // TenantID identifies the tenant in which the user authenticated. + TenantID string `json:"tenantId"` + + // Username is the user's preferred username. + Username string `json:"username"` + + // Version of the AuthenticationRecord. + Version string `json:"version"` +} + +// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord +func (a *authenticationRecord) UnmarshalJSON(b []byte) error { + // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we + // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally + // different type enables this by assigning all the fields without recursing into this method. + type r authenticationRecord + err := json.Unmarshal(b, (*r)(a)) + if err != nil { + return err + } + if a.Version == "" { + return errors.New("AuthenticationRecord must have a version") + } + for _, v := range supportedAuthRecordVersions { + if a.Version == v { + return nil + } + } + return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions) +} + +// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. +func (a *authenticationRecord) account() public.Account { + return public.Account{ + Environment: a.Authority, + HomeAccountID: a.HomeAccountID, + PreferredUsername: a.Username, + } +} + +func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { + u, err := url.Parse(ar.IDToken.Issuer) + if err != nil { + return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + } + tenant := ar.IDToken.TenantID + if tenant == "" { + tenant = strings.Trim(u.Path, "/") + } + username := ar.IDToken.PreferredUsername + if username == "" { + username = ar.IDToken.UPN + } + return authenticationRecord{ + Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), + ClientID: ar.IDToken.Audience, + HomeAccountID: ar.Account.HomeAccountID, + TenantID: tenant, + Username: username, + Version: "1.0", + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 739ff49c1ec4f..c3bcfb56c0a9f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -10,17 +10,17 @@ import ( "bytes" "context" "errors" + "fmt" "io" "net/http" "net/url" "os" - "regexp" - "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -41,65 +41,25 @@ const ( organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" - tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" + + traceNamespace = "Microsoft.Entra" + traceOpGetToken = "GetToken" + traceOpAuthenticate = "Authenticate" ) var ( // capability CP1 indicates the client application is capable of handling CAE claims challenges - cp1 = []string{"CP1"} - // CP1 is disabled until CAE support is added back - disableCP1 = true + cp1 = []string{"CP1"} + errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { - if !validTenantID(tenantID) { - return confidential.Client{}, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(co.Cloud) - if err != nil { - return confidential.Client{}, err - } - authority := runtime.JoinPaths(authorityHost, tenantID) - o := []confidential.Option{ - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - confidential.WithHTTPClient(newPipelineAdapter(co)), - } - if !disableCP1 { - o = append(o, confidential.WithClientCapabilities(cp1)) - } - o = append(o, additionalOpts...) - if strings.ToLower(tenantID) == "adfs" { - o = append(o, confidential.WithInstanceDiscovery(false)) - } - return confidential.New(authority, clientID, cred, o...) -} - -var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { - if !validTenantID(tenantID) { - return public.Client{}, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(co.Cloud) - if err != nil { - return public.Client{}, err - } - o := []public.Option{ - public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - public.WithHTTPClient(newPipelineAdapter(co)), - } - if !disableCP1 { - o = append(o, public.WithClientCapabilities(cp1)) - } - o = append(o, additionalOpts...) - if strings.ToLower(tenantID) == "adfs" { - o = append(o, public.WithInstanceDiscovery(false)) - } - return public.New(clientID, o...) -} +// tokenCachePersistenceOptions contains options for persistent token caching +type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions // setAuthorityHost initializes the authority host for credentials. Precedence is: -// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user -// 2. value of AZURE_AUTHORITY_HOST -// 3. default: Azure Public Cloud +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud func setAuthorityHost(cc cloud.Configuration) (string, error) { host := cc.ActiveDirectoryAuthorityHost if host == "" { @@ -121,29 +81,55 @@ func setAuthorityHost(cc cloud.Configuration) (string, error) { return host, nil } -// validTenantID return true is it receives a valid tenantID, returns false otherwise -func validTenantID(tenantID string) bool { - match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) - if err != nil { - return false +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } } - return match + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp } -func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { - pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) - return pipelineAdapter{pl: pl} +// resolveTenant returns the correct tenant for a token request +func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { + if specified == "" || specified == defaultTenant { + return defaultTenant, nil + } + if defaultTenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(specified) { + return "", errInvalidTenantID + } + for _, t := range additionalTenants { + if t == "*" || t == specified { + return specified, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } -type pipelineAdapter struct { - pl runtime.Pipeline +func alphanumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') } -func (p pipelineAdapter) CloseIdleConnections() { - // do nothing +func validTenantID(tenantID string) bool { + for _, r := range tenantID { + if !(alphanumeric(r) || r == '.' || r == '-') { + return false + } + } + return true } -func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { +func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) { req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) if err != nil { return nil, err @@ -165,7 +151,18 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { return nil, err } } - resp, err := p.pl.Do(req) + + // copy headers to the new request, ignoring any for which the new request has a value + h := req.Raw().Header + for key, vals := range r.Header { + if _, has := h[key]; !has { + for _, val := range vals { + h.Add(key, val) + } + } + } + + resp, err := client.Pipeline().Do(req) if err != nil { return nil, err } @@ -173,7 +170,7 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { } // enables fakes for test scenarios -type confidentialClient interface { +type msalConfidentialClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) @@ -181,7 +178,7 @@ type confidentialClient interface { } // enables fakes for test scenarios -type publicClient interface { +type msalPublicClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 33ff13c09db57..43577ab3c5f39 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -14,22 +14,19 @@ import ( "fmt" "os" "os/exec" - "regexp" "runtime" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) -const ( - credNameAzureCLI = "AzureCLICredential" - timeoutCLIRequest = 10 * time.Second -) +const credNameAzureCLI = "AzureCLICredential" -// used by tests to fake invoking the CLI -type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) +type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { @@ -37,24 +34,32 @@ type AzureCLICredentialOptions struct { // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the // logged in account can access. AdditionallyAllowedTenants []string + + // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // than the Azure CLI's current account. + subscription string + // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. TenantID string - tokenProvider azureCLITokenProvider + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking az + tokenProvider azTokenProvider } // init returns an instance of AzureCLICredentialOptions initialized with default values. func (o *AzureCLICredentialOptions) init() { if o.tokenProvider == nil { - o.tokenProvider = defaultTokenProvider() + o.tokenProvider = defaultAzTokenProvider } } // AzureCLICredential authenticates as the identity logged in to the Azure CLI. type AzureCLICredential struct { - s *syncer - tokenProvider azureCLITokenProvider + mu *sync.Mutex + opts AzureCLICredentialOptions } // NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. @@ -63,87 +68,97 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } + for _, r := range cp.subscription { + if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + } + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } cp.init() - c := AzureCLICredential{tokenProvider: cp.tokenProvider} - c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) - return &c, nil + cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) + return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil } // GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. // This method is called automatically by Azure SDK clients. func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} if len(opts.Scopes) != 1 { - return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") } - // CLI expects an AAD v1 resource, not a v2 scope - opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.s.GetToken(ctx, opts) -} - -func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + if !validScope(opts.Scopes[0]) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0]) + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) if err != nil { - return azcore.AccessToken{}, err + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + if err == nil { + at, err = c.createAccessToken(b) } - at, err := c.createAccessToken(b) if err != nil { - return azcore.AccessToken{}, err + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) return at, nil } -func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) - } - - // set a default timeout for this authentication iff the application hasn't done so already - var cancel context.CancelFunc - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) - defer cancel() +// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(scopes[0], defaultSuffix) + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + if subscription != "" { + // subscription needs quotes because it may contain spaces + commandLine += ` --subscription "` + subscription + `"` + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr - commandLine := "az account get-access-token -o json --resource " + resource - if tenantID != "" { - commandLine += " --tenant " + tenantID - } - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - dir := os.Getenv("SYSTEMROOT") - if dir == "" { - return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") - } - cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) - cliCmd.Dir = dir - } else { - cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) - cliCmd.Dir = "/bin" + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" } - cliCmd.Env = os.Environ() - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - msg := stderr.String() - var exErr *exec.ExitError - if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { - msg = "Azure CLI not found on path" - } - if msg == "" { - msg = err.Error() - } - return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + if msg == "" { + msg = err.Error() } - - return output, nil + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) } + + return output, nil } func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go new file mode 100644 index 0000000000000..cbe7c4c2db1fc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + +type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) + +// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. +type AzureDeveloperCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, + // which is the tenant of the selected Azure subscription. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking azd + tokenProvider azdTokenProvider +} + +// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. +// +// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview +type AzureDeveloperCLICredential struct { + mu *sync.Mutex + opts AzureDeveloperCLICredentialOptions +} + +// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options. +func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) { + cp := AzureDeveloperCLICredentialOptions{} + if options != nil { + cp = *options + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + if cp.tokenProvider == nil { + cp.tokenProvider = defaultAzdTokenProvider + } + return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd. +// This method is called automatically by Azure SDK clients. +func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) == 0 { + return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") + } + for _, scope := range opts.Scopes { + if !validScope(scope) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) + } + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "azd auth token -o json" + if tenant != "" { + commandLine += " --tenant-id " + tenant + } + for _, scope := range scopes { + commandLine += " --scope " + scope + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { + msg = "Azure Developer CLI not found on path" + } else if strings.Contains(msg, "azd auth login") { + msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) + } + return output, nil +} + +func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"token"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + return azcore.AccessToken{ + ExpiresOn: exp.UTC(), + Token: t.AccessToken, + }, nil +} + +var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 3b443e8eedb2e..d077682c5c2ba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -25,23 +25,11 @@ stages: - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: RunLiveTests: true + UsePipelineProxy: false ServiceDirectory: 'azidentity' - PreSteps: - - pwsh: | - [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream - Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS - [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream - env: - PFX_CONTENTS: $(net-identity-spcert-pfx) - PEM_CONTENTS: $(net-identity-spcert-pem) - SNI_CONTENTS: $(net-identity-spcert-sni) - EnvVars: - AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) - AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) - AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) - IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) - IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) - IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) - IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem - IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx - IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + # Contains alternate tenant, AAD app and cert info for testing + - $(sub-config-identity-test-resources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index d9d22996cd47e..fc3df68eb196e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -20,12 +21,11 @@ const credNameAssertion = "ClientAssertionCredential" // ClientAssertionCredential authenticates an application with assertions provided by a callback function. // This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for // the most common assertion scenario, authenticating a service principal with a certificate. See -// [Azure AD documentation] for details of the assertion format. +// [Microsoft Entra ID documentation] for details of the assertion format. // -// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format type ClientAssertionCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. @@ -36,11 +36,15 @@ type ClientAssertionCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -56,28 +60,26 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return getAssertion(ctx) }, ) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + } + c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { return nil, err } - cac := ClientAssertionCredential{client: c} - cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) - return &cac, nil + return &ClientAssertionCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 804eba899ecfc..607533f486e13 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -29,21 +30,25 @@ type ClientCertificateCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. type ClientCertificateCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. @@ -58,33 +63,27 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x if err != nil { return nil, err } - var o []confidential.Option - if options.SendCertificateChain { - o = append(o, confidential.WithX5C()) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } - o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { return nil, err } - cc := ClientCertificateCredential{client: c} - cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) - return &cc, nil + return &ClientCertificateCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } // ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index dda21f6b88d6d..9e6772e9b80a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -24,17 +25,20 @@ type ClientSecretCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientSecretCredential authenticates an application with a client secret. type ClientSecretCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. @@ -46,30 +50,26 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st if err != nil { return nil, err } - c, err := getConfidentialClient( - clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), - ) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + } + c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { return nil, err } - csc := ClientSecretCredential{client: c} - csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) - return &csc, nil + return &ClientSecretCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go new file mode 100644 index 0000000000000..854267bdbfd8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +type confidentialClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + // Assertion for on-behalf-of authentication + Assertion string + DisableInstanceDiscovery, SendX5C bool + tokenCachePersistenceOptions *tokenCachePersistenceOptions +} + +// confidentialClient wraps the MSAL confidential client +type confidentialClient struct { + cae, noCAE msalConfidentialClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + cred confidential.Credential + host string + name string + opts confidentialClientOptions + region string + azClient *azcore.Client +} + +func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(opts.Cloud) + if err != nil { + return nil, err + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &opts.ClientOptions) + if err != nil { + return nil, err + } + opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) + return &confidentialClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + cred: cred, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: opts, + region: os.Getenv(azureRegionalAuthorityName), + tenantID: tenantID, + azClient: client, + }, nil +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", c.name) + } + // we don't resolve the tenant for managed identities because they acquire tokens only from their home tenants + if c.name != credNameManagedIdentity { + tenant, err := c.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + tro.TenantID = tenant + } + client, mu, err := c.client(ctx, tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + var ar confidential.AuthResult + if c.opts.Assertion != "" { + ar, err = client.AcquireTokenOnBehalfOf(ctx, c.opts.Assertion, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } else { + ar, err = client.AcquireTokenSilent(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + if err != nil { + ar, err = client.AcquireTokenByCredential(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } + } + if err != nil { + // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. + // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, err.Error(), res, err) + } + } else { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { + c.clientMu.Lock() + defer c.clientMu.Unlock() + if tro.EnableCAE { + if c.cae == nil { + client, err := c.newMSALClient(true) + if err != nil { + return nil, nil, err + } + c.cae = client + } + return c.cae, c.caeMu, nil + } + if c.noCAE == nil { + client, err := c.newMSALClient(false) + if err != nil { + return nil, nil, err + } + c.noCAE = client + } + return c.noCAE, c.noCAEMu, nil +} + +func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } + authority := runtime.JoinPaths(c.host, c.tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(c.region), + confidential.WithCache(cache), + confidential.WithHTTPClient(c), + } + if enableCAE { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + if c.opts.SendX5C { + o = append(o, confidential.WithX5C()) + } + if c.opts.DisableInstanceDiscovery || strings.ToLower(c.tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, c.clientID, c.cred, o...) +} + +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (c *confidentialClient) resolveTenant(specified string) (string, error) { + return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (c *confidentialClient) CloseIdleConnections() { + // do nothing +} + +func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 1e3efdc97a966..35aeef8674780 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -21,6 +21,8 @@ import ( // DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. // These options may not apply to all credentials in the chain. type DefaultAzureCredentialOptions struct { + // ClientOptions has additional options for credentials that use an Azure SDK HTTP pipeline. These options don't apply + // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add @@ -28,12 +30,11 @@ type DefaultAzureCredentialOptions struct { // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID identifies the tenant the Azure CLI should authenticate in. - // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -48,6 +49,7 @@ type DefaultAzureCredentialOptions struct { // more control over its configuration. // - [ManagedIdentityCredential] // - [AzureCLICredential] +// - [AzureDeveloperCLICredential] // // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for @@ -83,11 +85,11 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) } - // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, + TenantID: options.TenantID, }) if err == nil { creds = append(creds, wic) @@ -95,6 +97,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} if ID, ok := os.LookupEnv(azureClientID); ok { o.ID = ClientID(ID) @@ -115,9 +118,19 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) } - err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) - if err != nil { - return nil, err + azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, azdCred) + } else { + errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err}) + } + + if len(errorMessages) > 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) } chain, err := NewChainedTokenCredential(creds, nil) @@ -128,27 +141,13 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default return &DefaultAzureCredential{chain: chain}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.chain.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) -func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { - errorMessage := strings.Join(errorMessages, "\n\t") - - if numberOfSuccessfulCredentials == 0 { - return errors.New(errorMessage) - } - - if len(errorMessages) != 0 { - log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) - } - - return nil -} - // defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. // Its GetToken method always returns a credentialUnavailableError having the same message as // the error that prevented constructing the credential. This ensures the message is present @@ -185,7 +184,7 @@ func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestO defer cancel() tk, err = w.mic.GetToken(c, opts) if isAuthFailedDueToContext(err) { - err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information") } else { // some managed identity implementation is available, so don't apply the timeout to future calls w.timeout = 0 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go new file mode 100644 index 0000000000000..d8b952f532eeb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "errors" + "time" +) + +// cliTimeout is the default timeout for authentication attempts via CLI tools +const cliTimeout = 10 * time.Second + +// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try +// the next credential in its chain (another developer credential). +func unavailableIfInChain(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain { + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) + } + } + return err +} + +// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials. +func validScope(scope string) bool { + for _, r := range scope { + if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 108e83c43aee2..1b7a283703a06 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -12,7 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameDeviceCode = "DeviceCodeCredential" @@ -24,19 +24,34 @@ type DeviceCodeCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user + // interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant // applications. TenantID string + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions + // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -64,20 +79,17 @@ type DeviceCodeMessage struct { UserCode string `json:"user_code"` // VerificationURL is the URL at which the user must authenticate. VerificationURL string `json:"verification_uri"` - // Message is user instruction from Azure Active Directory. + // Message is user instruction from Microsoft Entra ID. Message string `json:"message"` } // DeviceCodeCredential acquires tokens for a user via the device code flow, which has the -// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful // for authenticating a user in an environment without a web browser, such as an SSH session. -// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it // automatically opens a browser to the login page. type DeviceCodeCredential struct { - account public.Account - client publicClient - s *syncer - prompt func(context.Context, DeviceCodeMessage) error + client *publicClient } // NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. @@ -87,50 +99,40 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp = *options } cp.init() - c, err := getPublicClient( - cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), - ) + msalOpts := publicClientOptions{ + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + Record: cp.authenticationRecord, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { return nil, err } - cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} - cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) - return &cred, nil + c.name = credNameDeviceCode + return &DeviceCodeCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. -// This method is called automatically by Azure SDK clients. -func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) +// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err } -func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) - if err != nil { - return azcore.AccessToken{}, err - } - err = c.prompt(ctx, DeviceCodeMessage{ - Message: dc.Result.Message, - UserCode: dc.Result.UserCode, - VerificationURL: dc.Result.VerificationURL, - }) - if err != nil { - return azcore.AccessToken{}, err - } - ar, err := dc.AuthenticationResult(ctx) - if err != nil { - return azcore.AccessToken{}, err - } - c.account = ar.Account - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication. +// This method is called automatically by Azure SDK clients. +func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 7ecd928e02456..42f84875e23a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -25,7 +25,7 @@ type EnvironmentCredentialOptions struct { azcore.ClientOptions // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -156,7 +156,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.cred.GetToken(ctx, opts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 86d8976a4b228..335d2b7dcf24a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -11,13 +11,17 @@ import ( "encoding/json" "errors" "fmt" - "io" "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) +// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token +// because user interaction is required and the credential is configured not to automatically prompt the user. +var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} + // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -53,21 +57,26 @@ func (e *AuthenticationFailedError) Error() string { } msg := &bytes.Buffer{} fmt.Fprintf(msg, e.credType+" authentication failed\n") - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + // this happens when the response is created from a custom HTTP transporter, + // which doesn't guarantee to bind the original request to the response + fmt.Fprintln(msg, "Request information not available") + } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - body, err := io.ReadAll(e.RawResponse.Body) - e.RawResponse.Body.Close() - if err != nil { + body, err := runtime.Payload(e.RawResponse) + switch { + case err != nil: fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + case len(body) > 0: if err := json.Indent(msg, body, "", " "); err != nil { // failed to pretty-print so just dump it verbatim fmt.Fprint(msg, string(body)) } - } else { + default: fmt.Fprint(msg, "Response contained no body") } fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") @@ -75,6 +84,8 @@ func (e *AuthenticationFailedError) Error() string { switch e.credType { case credNameAzureCLI: anchor = "azure-cli" + case credNameAzureDeveloperCLI: + anchor = "azd" case credNameCert: anchor = "client-cert" case credNameSecret: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 4868d22c3e1fe..bd829698375c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -11,7 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameBrowser = "InteractiveBrowserCredential" @@ -23,26 +23,40 @@ type InteractiveBrowserCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when + // user interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. LoginHint string - // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + + // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required // only when setting ClientID, and must match a redirect URI in the application's registration. // Applications which have registered "http://localhost" as a redirect URI need not set this option. RedirectURL string - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -56,10 +70,7 @@ func (o *InteractiveBrowserCredentialOptions) init() { // InteractiveBrowserCredential opens a browser to interactively authenticate a user. type InteractiveBrowserCredential struct { - account public.Account - client publicClient - options InteractiveBrowserCredentialOptions - s *syncer + client *publicClient } // NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. @@ -69,38 +80,39 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp = *options } cp.init() - c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + msalOpts := publicClientOptions{ + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + Record: cp.authenticationRecord, + RedirectURL: cp.RedirectURL, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { return nil, err } - ibc := InteractiveBrowserCredential{client: c, options: cp} - ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) - return &ibc, nil + return &InteractiveBrowserCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. -func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) +// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err } -func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, - public.WithLoginHint(c.options.LoginHint), - public.WithRedirectURI(c.options.RedirectURL), - public.WithTenantID(opts.TenantID), - ) - if err == nil { - c.account = ar.Account - } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go new file mode 100644 index 0000000000000..b1b4d5c8bd35c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// TokenCachePersistenceOptions contains options for persistent token caching +type TokenCachePersistenceOptions struct { + // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text + // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts + // encryption before falling back to plaintext storage. + AllowUnencryptedStorage bool + + // Name identifies the cache. Set this to isolate data from other applications. + Name string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go new file mode 100644 index 0000000000000..c1498b464471b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "errors" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") + +// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to +// use a persistent cache must first import the cache module, which will replace this function +// with a platform-specific implementation. +var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { + if o == nil { + return nil, nil + } + return nil, errMissingImport +} + +// CacheFilePath returns the path to the cache file for the given name. +// Defining it in this package makes it available to azidentity tests. +var CacheFilePath = func(name string) (string, error) { + return "", errMissingImport +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index d7b4a32a544f2..7c25cb8bdd554 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -28,12 +28,14 @@ import ( const ( arcIMDSEndpoint = "IMDS_ENDPOINT" + defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID" identityEndpoint = "IDENTITY_ENDPOINT" identityHeader = "IDENTITY_HEADER" identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" msiEndpoint = "MSI_ENDPOINT" + msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" serviceFabricAPIVersion = "2019-07-01-preview" @@ -47,6 +49,7 @@ type msiType int const ( msiTypeAppService msiType = iota msiTypeAzureArc + msiTypeAzureML msiTypeCloudShell msiTypeIMDS msiTypeServiceFabric @@ -55,7 +58,7 @@ const ( // managedIdentityClient provides the base for authenticating in managed identity environments // This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - pipeline runtime.Pipeline + azClient *azcore.Client msiType msiType endpoint string id ManagedIDKind @@ -84,13 +87,15 @@ func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { } if o.StatusCodes == nil { o.StatusCodes = []int{ - // IMDS docs recommend retrying 404, 429 and all 5xx - // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + // IMDS docs recommend retrying 404, 410, 429 and 5xx + // https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling http.StatusNotFound, // 404 + http.StatusGone, // 410 http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 http.StatusNotImplemented, // 501 http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 http.StatusGatewayTimeout, // 504 http.StatusHTTPVersionNotSupported, // 505 http.StatusVariantAlsoNegotiates, // 506 @@ -133,13 +138,27 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAzureArc } } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - env = "Cloud Shell" c.endpoint = endpoint - c.msiType = msiTypeCloudShell + if _, ok := os.LookupEnv(msiSecret); ok { + env = "Azure ML" + c.msiType = msiTypeAzureML + } else { + env = "Cloud Shell" + c.msiType = msiTypeCloudShell + } } else { setIMDSRetryOptionDefaults(&cp.Retry) } - c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &cp) + if err != nil { + return nil, err + } + c.azClient = client if log.Should(EventAuthentication) { log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) @@ -166,7 +185,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, err } - resp, err := c.pipeline.Do(msg) + resp, err := c.azClient.Pipeline().Do(msg) if err != nil { return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } @@ -175,11 +194,25 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return c.createAccessToken(resp) } - if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { - if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + if c.msiType == msiTypeIMDS { + switch resp.StatusCode { + case http.StatusBadRequest: + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + msg := "failed to authenticate a system assigned identity" + if body, err := runtime.Payload(resp); err == nil && len(body) > 0 { + msg += fmt.Sprintf(". The endpoint responded with %s", body) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + case http.StatusForbidden: + // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, + // we return credentialUnavailableError so credential chains continue to their next credential + body, err := runtime.Payload(resp) + if err == nil && strings.Contains(string(body), "A socket operation was attempted to an unreachable network") { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) + } } - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) @@ -231,6 +264,8 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) } return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeAzureML: + return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: return c.createServiceFabricAuthRequest(ctx, id, scopes) case msiTypeCloudShell: @@ -280,6 +315,29 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, return request, nil } +func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("secret", os.Getenv(msiSecret)) + q := request.Raw().URL.Query() + q.Add("api-version", "2017-09-01") + q.Add("resource", strings.Join(scopes, " ")) + q.Add("clientid", os.Getenv(defaultIdentityClientID)) + if id != nil { + if id.idKind() == miResourceID { + log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") + q.Set("clientid", "") + q.Set(qpResID, id.String()) + } else { + q.Set("clientid", id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { @@ -314,7 +372,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour q.Add("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key - response, err := c.pipeline.Do(request) + response, err := c.azClient.Pipeline().Do(request) if err != nil { return "", err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index c6710ae52aefe..dcd278befa168 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -8,12 +8,12 @@ package azidentity import ( "context" - "errors" "fmt" "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -68,12 +68,11 @@ type ManagedIdentityCredentialOptions struct { // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: -// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: +// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - client confidentialClient + client *confidentialClient mic *managedIdentityClient - s *syncer } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -93,35 +92,30 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if options.ID != nil { clientID = options.ID.String() } - // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used - c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ + ClientOptions: options.ClientOptions, + }) if err != nil { return nil, err } - m := ManagedIdentityCredential{client: c, mic: mic} - m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) - return &m, nil + return &ManagedIdentityCredential{client: c, mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + if len(opts.Scopes) != 1 { - err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.s.GetToken(ctx, opts) -} - -func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 3e173f47d26d6..5e67cf02145d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -21,13 +22,11 @@ const credNameOBO = "OnBehalfOfCredential" // OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by // middle-tier services that authorize requests to other services with a delegated user identity. Because this // is not an interactive authentication flow, an application using it must have admin consent for any delegated -// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { - assertion string - client confidentialClient - s *syncer + client *confidentialClient } // OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential @@ -38,11 +37,13 @@ type OnBehalfOfCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. // This setting controls whether the credential sends the public certificate chain in the x5c header of each // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. @@ -72,28 +73,27 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf if options == nil { options = &OnBehalfOfCredentialOptions{} } - opts := []confidential.Option{} - if options.SendCertificateChain { - opts = append(opts, confidential.WithX5C()) + opts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Assertion: userAssertion, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } - opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + c, err := newConfidentialClient(tenantID, clientID, credNameOBO, cred, opts) if err != nil { return nil, err } - obo := OnBehalfOfCredential{assertion: userAssertion, client: c} - obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) - return &obo, nil + return &OnBehalfOfCredential{c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return o.s.GetToken(ctx, opts) -} - -func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := o.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go new file mode 100644 index 0000000000000..63c31190d188b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -0,0 +1,273 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + + // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate() + _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" +) + +type publicClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableAutomaticAuthentication bool + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Record authenticationRecord + TokenCachePersistenceOptions *tokenCachePersistenceOptions + Username, Password string +} + +// publicClient wraps the MSAL public client +type publicClient struct { + cae, noCAE msalPublicClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + defaultScope []string + host string + name string + opts publicClientOptions + record authenticationRecord + azClient *azcore.Client +} + +var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions") + +func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(o.Cloud) + if err != nil { + return nil, err + } + // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate() + audience := o.Cloud.Services[cloud.ResourceManager].Audience + if audience == "" { + // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash) + if !strings.HasSuffix(host, "/") { + host += "/" + } + switch host { + case cloud.AzureChina.ActiveDirectoryAuthorityHost: + audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience + case cloud.AzureGovernment.ActiveDirectoryAuthorityHost: + audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience + case cloud.AzurePublic.ActiveDirectoryAuthorityHost: + audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience + } + } + // if we didn't come up with an audience, the application will have to specify a scope for Authenticate() + var defaultScope []string + if audience != "" { + defaultScope = []string{audience + defaultSuffix} + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &o.ClientOptions) + if err != nil { + return nil, err + } + o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) + return &publicClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + defaultScope: defaultScope, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + record: o.Record, + tenantID: tenantID, + azClient: client, + }, nil +} + +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { + if tro == nil { + tro = &policy.TokenRequestOptions{} + } + if len(tro.Scopes) == 0 { + if p.defaultScope == nil { + return authenticationRecord{}, errScopeRequired + } + tro.Scopes = p.defaultScope + } + client, mu, err := p.client(*tro) + if err != nil { + return authenticationRecord{}, err + } + mu.Lock() + defer mu.Unlock() + _, err = p.reqToken(ctx, client, *tro) + if err == nil { + scope := strings.Join(tro.Scopes, ", ") + msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope) + log.Write(EventAuthentication, msg) + } + return p.record, err +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", p.name) + } + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + client, mu, err := p.client(tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if err == nil { + return p.token(ar, err) + } + if p.opts.DisableAutomaticAuthentication { + return azcore.AccessToken{}, errAuthenticationRequired + } + at, err := p.reqToken(ctx, client, tro) + if err == nil { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. +func (p *publicClient) reqToken(ctx context.Context, c msalPublicClient, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + var ar public.AuthResult + switch p.name { + case credNameBrowser: + ar, err = c.AcquireTokenInteractive(ctx, tro.Scopes, + public.WithClaims(tro.Claims), + public.WithLoginHint(p.opts.LoginHint), + public.WithRedirectURI(p.opts.RedirectURL), + public.WithTenantID(tenant), + ) + case credNameDeviceCode: + dc, e := c.AcquireTokenByDeviceCode(ctx, tro.Scopes, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if e != nil { + return azcore.AccessToken{}, e + } + err = p.opts.DeviceCodePrompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err == nil { + ar, err = dc.AuthenticationResult(ctx) + } + case credNameUserPassword: + ar, err = c.AcquireTokenByUsernamePassword(ctx, tro.Scopes, p.opts.Username, p.opts.Password, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + default: + return azcore.AccessToken{}, fmt.Errorf("unknown credential %q", p.name) + } + return p.token(ar, err) +} + +func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, *sync.Mutex, error) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + if tro.EnableCAE { + if p.cae == nil { + client, err := p.newMSALClient(true) + if err != nil { + return nil, nil, err + } + p.cae = client + } + return p.cae, p.caeMu, nil + } + if p.noCAE == nil { + client, err := p.newMSALClient(false) + if err != nil { + return nil, nil, err + } + p.noCAE = client + } + return p.noCAE, p.noCAEMu, nil +} + +func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), + public.WithCache(cache), + public.WithHTTPClient(p), + } + if enableCAE { + o = append(o, public.WithClientCapabilities(cp1)) + } + if p.opts.DisableInstanceDiscovery || strings.ToLower(p.tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(p.clientID, o...) +} + +func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { + if err == nil { + p.record, err = newAuthenticationRecord(ar) + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(p.name, err.Error(), res, err) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (p *publicClient) resolveTenant(specified string) (string, error) { + t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + if t == p.tenantID { + // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify + // the client's default tenant and doing so is an error when that tenant is "organizations" + t = "" + } + return t, err +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (p *publicClient) CloseIdleConnections() { + // do nothing +} + +func (p *publicClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(p.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go deleted file mode 100644 index ae38555994b08..0000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go +++ /dev/null @@ -1,130 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azidentity - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" -) - -type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) - -// syncer synchronizes authentication calls so that goroutines can share a credential instance -type syncer struct { - addlTenants []string - authing bool - cond *sync.Cond - reqToken, silent authFn - name, tenant string -} - -func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { - return &syncer{ - addlTenants: resolveAdditionalTenants(additionalTenants), - cond: &sync.Cond{L: &sync.Mutex{}}, - name: name, - reqToken: reqToken, - silent: silentAuth, - tenant: tenant, - } -} - -// GetToken ensures that only one goroutine authenticates at a time -func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - var at azcore.AccessToken - var err error - if len(opts.Scopes) == 0 { - return at, errors.New(s.name + ".GetToken() requires at least one scope") - } - // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants - if s.name != credNameManagedIdentity { - tenant, err := s.resolveTenant(opts.TenantID) - if err != nil { - return at, err - } - opts.TenantID = tenant - } - auth := false - s.cond.L.Lock() - defer s.cond.L.Unlock() - for { - at, err = s.silent(ctx, opts) - if err == nil { - // got a token - break - } - if !s.authing { - // this goroutine will request a token - s.authing, auth = true, true - break - } - // another goroutine is acquiring a token; wait for it to finish, then try silent auth again - s.cond.Wait() - } - if auth { - s.authing = false - at, err = s.reqToken(ctx, opts) - s.cond.Broadcast() - } - if err != nil { - // Return credentialUnavailableError directly because that type affects the behavior of credential chains. - // Otherwise, return AuthenticationFailedError. - var unavailableErr *credentialUnavailableError - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(s.name, err.Error(), res, err) - } - } else if log.Should(EventAuthentication) { - scope := strings.Join(opts.Scopes, ", ") - msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) - log.Write(EventAuthentication, msg) - } - return at, err -} - -// resolveTenant returns the correct tenant for a token request given the credential's -// configuration, or an error when the specified tenant isn't allowed by that configuration -func (s *syncer) resolveTenant(requested string) (string, error) { - if requested == "" || requested == s.tenant { - return s.tenant, nil - } - if s.tenant == "adfs" { - return "", errors.New("ADFS doesn't support tenants") - } - if !validTenantID(requested) { - return "", errors.New(tenantIDValidationErr) - } - for _, t := range s.addlTenants { - if t == "*" || t == requested { - return requested, nil - } - } - return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) -} - -// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard -func resolveAdditionalTenants(tenants []string) []string { - if len(tenants) == 0 { - return nil - } - for _, t := range tenants { - // a wildcard makes all other values redundant - if t == "*" { - return []string{"*"} - } - } - cp := make([]string, len(tenants)) - copy(cp, tenants) - return cp -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 new file mode 100644 index 0000000000000..fe0183addeb54 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -0,0 +1,36 @@ +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +if (!$CI) { + # TODO: Remove this once auto-cloud config downloads are supported locally + Write-Host "Skipping cert setup in local testing mode" + return +} + +if ($EnvironmentVariables -eq $null -or $EnvironmentVariables.Count -eq 0) { + throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" +} + +$tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() +$pfxPath = Join-Path $tmp "test.pfx" +$pemPath = Join-Path $tmp "test.pem" +$sniPath = Join-Path $tmp "testsni.pfx" + +Write-Host "Creating identity test files: $pfxPath $pemPath $sniPath" + +[System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream +Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] +[System.Convert]::FromBase64String($EnvironmentVariables['SNI_CONTENTS']) | Set-Content -Path $sniPath -AsByteStream + +# Set for pipeline +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_SNI;]$sniPath" +# Set for local +$env:IDENTITY_SP_CERT_PFX = $pfxPath +$env:IDENTITY_SP_CERT_PEM = $pemPath +$env:IDENTITY_SP_CERT_SNI = $sniPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep new file mode 100644 index 0000000000000..b3490d3b50afd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -0,0 +1 @@ +param baseName string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 8e652e33ff6f1..294ed81e951cd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -11,7 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameUserPassword = "UsernamePasswordCredential" @@ -24,11 +24,19 @@ type UsernamePasswordCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -36,10 +44,7 @@ type UsernamePasswordCredentialOptions struct { // with any form of multi-factor authentication, and the application must already have user or admin consent. // This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. type UsernamePasswordCredential struct { - account public.Account - client publicClient - password, username string - s *syncer + client *publicClient } // NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user @@ -48,34 +53,38 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st if options == nil { options = &UsernamePasswordCredentialOptions{} } - c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + opts := publicClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.authenticationRecord, + TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + Username: username, + } + c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { return nil, err } - upc := UsernamePasswordCredential{client: c, password: password, username: username} - upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) - return &upc, nil + return &UsernamePasswordCredential{client: c}, err } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. -func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) +// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err } -func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) - if err == nil { - c.account = ar.Account - } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. +func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 1a526b2e874dc..e8caeea71efa9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -10,6 +10,9 @@ const ( // UserAgent is the string to be used in the user agent string when making requests. component = "azidentity" + // module is the fully qualified name of the module used in telemetry and distributed tracing. + module = "github.com/Azure/azure-sdk-for-go/sdk/" + component + // Version is the semantic version (see http://semver.org) of this module. - version = "v1.3.0" + version = "v1.5.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 7bfb3436760df..3e43e788e9312 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameWorkloadIdentity = "WorkloadIdentityCredential" @@ -41,13 +42,13 @@ type WorkloadIdentityCredentialOptions struct { // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string - // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string } @@ -88,14 +89,18 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( return nil, err } // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" - cred.s.name = credNameWorkloadIdentity + cred.client.name = credNameWorkloadIdentity w.cred = cred return &w, nil } -// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return w.cred.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := w.cred.GetToken(ctx, opts) + return tk, err } // getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go index ade7b348e3033..8ee66b52676ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -14,3 +14,33 @@ type NonRetriable interface { error NonRetriable() } + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go index d4ed6ccc8ad4a..9948f604b3012 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go @@ -39,6 +39,11 @@ type PayloadOptions struct { // Subsequent reads will access the cached value. // Exported as runtime.Payload() WITHOUT the opts parameter. func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) { + if resp.Body == nil { + // this shouldn't happen in real-world scenarios as a + // response with no body should set it to http.NoBody + return nil, nil + } modifyBytes := func(b []byte) []byte { return b } if opts != nil && opts.BytesModifier != nil { modifyBytes = opts.BytesModifier diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 2c2008679be81..3b66f7be95212 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,187 @@ # Release History +## 1.3.0 (2024-02-12) + +### Bugs Fixed +* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). +* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). +* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). + +### Other Changes +* Updated the version of `azcore` to `1.9.2` + +## 1.3.0-beta.1 (2024-01-09) + +### Features Added + +* Updated service version to `2023-11-03`. +* Added support for Audience when OAuth is used. + +### Bugs Fixed + +* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). + +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + +## 1.2.0 (2023-10-11) + +### Bugs Fixed +* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. + +## 1.2.0-beta.1 (2023-09-18) + +### Features Added +* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 +* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). +* Added `CopySourceTag` option for `UploadBlobFromURLOptions` +* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. +* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). +* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS +* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. +* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. + +### Bugs Fixed +* Fixed issue where some requests fail with mismatch in string to sign. +* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). + +### Other Changes +* Updating version of azcore to 1.6.0. + +## 1.1.0 (2023-07-13) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. +* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). + +### Other Changes + +* Add `dragonfly` to the list of build constraints for `blockblob`. +* Updating version of azcore to 1.6.0 and azidentity to 1.3.0 + +## 1.1.0-beta.1 (2023-05-09) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. + +## 1.0.0 (2023-02-07) + +### Features Added + +* Add support to log calculated block size and count during uploads +* Added MissingSharedKeyCredential error type for cleaner UX. Related to [#19864](https://github.com/Azure/azure-sdk-for-go/issues/19864). + +### Breaking Changes + +* Changed API signatures to map correctly to Azure Storage REST APIs, These changes impact: + * `blob.GetSASURL()` + * `blockblob.StageBlockFromURL()` + * `container.SetAccessPolicy()` + * `container.GetSASURL()` + * `service.GetSASURL()` + * `service.FilterBlobs()` + * `lease.AcquireLease()` (blobs and containers) + * `lease.ChangeLease()` (blobs and containers) +* Type name changes: + * `CpkInfo` -> `CPKInfo` + * `CpkScopeInfo` -> `CPKScopeInfo` + * `RuleId` -> `RuleID` + * `PolicyId` -> `PolicyID` + * `CorsRule` -> `CORSRule` +* Remove `AccountServices` it is now hardcoded to blobs + +### Bugs Fixed + +* Fixed encoding issues seen in FilterBlobs. Fixes [#17421](https://github.com/Azure/azure-sdk-for-go/issues/17421). +* Fixing inconsistency seen with Metadata and ORS response. Fixes [#19688](https://github.com/Azure/azure-sdk-for-go/issues/19688). +* Fixed endless loop during pagination issue [#19773](https://github.com/Azure/azure-sdk-for-go/pull/19773). + +### Other Changes + +* Exported some missing types in the `blob`, `container` and `service` packages. Fixes [#19775](https://github.com/Azure/azure-sdk-for-go/issues/19775). +* SAS changes [#19781](https://github.com/Azure/azure-sdk-for-go/pull/19781): + * AccountSASPermissions: SetImmutabilityPolicy support + * ContainerSASPermissions: Move support + * Validations to ensure correct sas perm ordering + +## 0.6.1 (2022-12-09) + +### Bugs Fixed + +* Fix compilation error on Darwin. + +## 0.6.0 (2022-12-08) + +### Features Added + +* Added BlobDeleteType to DeleteOptions to allow access to ['Permanent'](https://learn.microsoft.com/rest/api/storageservices/delete-blob#permanent-delete) DeleteType. +* Added [Set Blob Expiry API](https://learn.microsoft.com/rest/api/storageservices/set-blob-expiry). +* Added method `ServiceClient()` to the `azblob.Client` type, allowing access to the underlying service client. +* Added support for object level immutability policy with versioning (Version Level WORM). +* Added the custom CRC64 polynomial used by storage for transactional hashes, and implemented automatic hashing for transactions. + +### Breaking Changes + +* Corrected the name for `saoid` and `suoid` SAS parameters in `BlobSignatureValues` struct as per [this](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas#construct-a-user-delegation-sas) +* Updated type of `BlockSize` from int to int64 in `UploadStreamOptions` +* CRC64 transactional hashes are now supplied with a `uint64` rather than a `[]byte` to conform with Golang's `hash/crc64` package +* Field `XMSContentCRC64` has been renamed to `ContentCRC64` +* The `Lease*` constant types and values in the `blob` and `container` packages have been moved to the `lease` package and their names fixed up to avoid stuttering. +* Fields `TransactionalContentCRC64` and `TransactionalContentMD5` have been replaced by `TransactionalValidation`. +* Fields `SourceContentCRC64` and `SourceContentMD5` have been replaced by `SourceContentValidation`. +* Field `TransactionalContentMD5` has been removed from type `AppendBlockFromURLOptions`. + +### Bugs Fixed + +* Corrected signing of User Delegation SAS. Fixes [#19372](https://github.com/Azure/azure-sdk-for-go/issues/19372) and [#19454](https://github.com/Azure/azure-sdk-for-go/issues/19454) +* Added formatting of start and expiry time in [SetAccessPolicy](https://learn.microsoft.com/rest/api/storageservices/set-container-acl#request-body). Fixes [#18712](https://github.com/Azure/azure-sdk-for-go/issues/18712) +* Uploading block blobs larger than 256MB can fail in some cases with error `net/http: HTTP/1.x transport connection broken`. +* Blob name parameters are URL-encoded before constructing the complete blob URL. + +### Other Changes + +* Added some missing public surface area in the `container` and `service` packages. +* The `UploadStream()` methods now use anonymous memory mapped files for buffers in order to reduce heap allocations/fragmentation. + * The anonymous memory mapped files are typically backed by the page/swap file, multiple files are not actually created. + ## 0.5.1 (2022-10-11) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 467fe36cd90b8..1f51959fa3d6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,47 +1,51 @@ -# Azure Blob Storage SDK for Go +# Azure Blob Storage module for Go -> Server Version: 2020-10-02 +> Service Version: 2023-11-03 -Azure Blob storage is Microsoft's object storage solution for the cloud. Blob -storage is optimized for storing massive amounts of unstructured data. -Unstructured data is data that does not adhere to a particular data model or -definition, such as text or binary data. +Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob +Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or +definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction). -[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] +Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to: -## Getting started - -### Install the package +* Authenticate clients with Azure Blob Storage +* Manipulate containers and blobs in an Azure storage account -Install the Azure Blob Storage SDK for Go with [go get][goget]: +Key links: -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -``` +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples] -If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/azidentity -``` +## Getting started ### Prerequisites -A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). - -You need an [Azure subscription][azure_sub] and a -[Storage Account][storage_account_docs] to use this package. - -To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +- Azure subscription - [Create a free account](https://azure.microsoft.com/free/) +- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], [Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. Here's an example using the Azure CLI: -```Powershell +```bash az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS ``` +### Install the package + +Install the Azure Blob Storage client module for Go with [go get][goget]: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + ### Authenticate the client -In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. +To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. ```go // create a credential for authenticating with Azure Active Directory @@ -53,11 +57,17 @@ client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/ // TODO: handle err ``` -Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). +Learn more about enabling Azure Active Directory for authentication with Azure Storage: + +* [Authorize access to blobs using Azure Active Directory][storage_ad] + +Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see: + +* [Blob samples][samples] ## Key concepts -Blob storage is designed for: +Blob Storage is designed for: - Serving images or documents directly to a browser. - Storing files for distributed access. @@ -66,23 +76,41 @@ Blob storage is designed for: - Storing data for backup and restore, disaster recovery, and archiving. - Storing data for analysis by an on-premises or Azure-hosted service. -Blob storage offers three types of resources: +Blob Storage offers three types of resources: - The _storage account_ - One or more _containers_ in a storage account -- One ore more _blobs_ in a container +- One or more _blobs_ in a container Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. The storage account is specified when the `azblob.Client` is constructed. -Use the appropriate client constructor function for the authentication mechanism you wish to use. -Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) +### Specialized clients + +The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + +- [appendblob][append_blob] +- [blockblob][block_blob] +- [pageblob][page_blob] + +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. + +The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more. + +The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens. +See the package's documentation for more information. ### Goroutine safety -We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. -### About blob metadata -Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. +We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines. + +### Blob metadata + +Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. ### Additional concepts @@ -94,7 +122,7 @@ Blob metadata name/value pairs are valid HTTP headers and should adhere to all r ## Examples -### Uploading a blob +### Upload a blob ```go const ( @@ -122,7 +150,7 @@ _, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) // TODO: handle error ``` -### Downloading a blob +### Download a blob ```go // this example accesses a public blob via anonymous access, so no credentials are required @@ -139,7 +167,7 @@ _, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) // TODO: handle error ``` -### Enumerating blobs +### Enumerate blobs ```go const ( @@ -177,7 +205,7 @@ All Blob service operations will return an [*azcore.ResponseError][azcore_response_error] on failure with a populated `ErrorCode` field. Many of these errors are recoverable. The [bloberror][blob_error] package provides the possible Storage error codes -along with various helper facilities for error handling. +along with helper facilities for error handling. ```go const ( @@ -201,28 +229,7 @@ if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNo ## Next steps -Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. - -### Specialized clients - -The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. -Use these clients when you need to interact with a specific kind of blob. -Learn more about the various types of blobs from the following links. - -- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) -- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) -- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) - -The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. - -The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. - -The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. - -The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. - -The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. -See the package's documentation for more information. +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. ## Contributing @@ -243,19 +250,20 @@ additional questions or comments. [source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob -[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api -[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation +[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview [godevdl]: https://go.dev/dl/ [goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them -[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview -[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell -[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli -[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[azure_cli]: https://docs.microsoft.com/cli/azure +[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main +[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://learn.microsoft.com/cli/azure [azure_sub]: https://azure.microsoft.com/free/ [azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity -[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad [azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError [samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go [append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index a6c204ac66180..a62abfdc0c21c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -8,8 +8,12 @@ package appendblob import ( "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "os" + "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" @@ -21,9 +25,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage append blob; type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] @@ -33,12 +35,17 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -47,9 +54,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -59,10 +70,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -103,6 +118,11 @@ func (ab *Client) generated() *generated.AppendBlobClient { return appendBlob } +func (ab *Client) innerBlobGenerated() *generated.BlobClient { + b := ab.BlobClient() + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (ab *Client) URL() string { return ab.generated().Endpoint() @@ -117,7 +137,7 @@ func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -129,7 +149,7 @@ func (ab *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. @@ -153,7 +173,22 @@ func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *Ap appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() - resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + if o != nil && o.TransactionalValidation != nil { + body, err = o.TransactionalValidation.Apply(body, appendOptions) + if err != nil { + return AppendBlockResponse{}, nil + } + } + + resp, err := ab.generated().AppendBlock(ctx, + count, + body, + appendOptions, + leaseAccessConditions, + appendPositionAccessConditions, + cpkInfo, + cpkScope, + modifiedAccessConditions) return resp, err } @@ -161,11 +196,25 @@ func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *Ap // AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { - appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions := o.format() // content length should be 0 on * from URL. always. It's a 400 if it isn't. - resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, - leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + resp, err := ab.generated().AppendBlockFromURL(ctx, + source, + 0, + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions) return resp, err } @@ -173,7 +222,11 @@ func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *Appe // https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() - resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + resp, err := ab.generated().Seal(ctx, + nil, + leaseAccessConditions, + modifiedAccessConditions, + positionAccessConditions) return resp, err } @@ -190,14 +243,39 @@ func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U return ab.BlobClient().Undelete(ctx, o) } -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return ab.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return ab.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return ab.BlobClient().SetLegalHold(ctx, legalHold, options) +} + +// SetTier +// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account. func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return ab.BlobClient().SetTier(ctx, tier, o) + return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account") +} + +// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry +func (ab *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { + if expiryType == nil { + expiryType = ExpiryTypeNever{} + } + et, opts := expiryType.Format(o) + resp, err := ab.innerBlobGenerated().SetExpiry(ctx, et, opts) + return resp, err } // GetProperties returns the blob's properties. @@ -206,6 +284,12 @@ func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return ab.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return ab.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -214,7 +298,7 @@ func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return ab.BlobClient().SetMetadata(ctx, metadata, o) } @@ -250,10 +334,16 @@ func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.Get return ab.BlobClient().GetTags(ctx, o) } -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +// CopyFromURL +// Deprecated: CopyFromURL works only with block blob func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return ab.BlobClient().CopyFromURL(ctx, copySource, o) + return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) } // Concurrent Download Functions ----------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go index 69faf6ad6546d..0834743f0c632 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -37,9 +37,9 @@ type CreateOptions struct { HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo // Optional. Used to set blob tags in various blob operations. Tags map[string]string @@ -49,10 +49,10 @@ type CreateOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string } -func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } @@ -66,57 +66,51 @@ func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *gen } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. type AppendBlockOptions struct { - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType AppendPositionAccessConditions *AppendPositionAccessConditions - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions } func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } - options := &generated.AppendBlobClientAppendBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions + return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. type AppendBlockFromURLOptions struct { - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType AppendPositionAccessConditions *AppendPositionAccessConditions - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions @@ -126,8 +120,8 @@ type AppendBlockFromURLOptions struct { Range blob.HTTPRange } -func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil @@ -135,13 +129,15 @@ func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendB options := &generated.AppendBlobClientAppendBlockFromURLOptions{ SourceRange: exported.FormatHTTPRange(o.Range), - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, + CopySourceAuthorization: o.CopySourceAuthorization, + } + + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -164,3 +160,21 @@ func (o *SealOptions) format() (*generated.LeaseAccessConditions, } // --------------------------------------------------------------------------------------------------------------------- + +// ExpiryType defines values for ExpiryType +type ExpiryType = exported.ExpiryType + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry +type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation + +// ExpiryTypeNever defines that the blob will be set to never expire +type ExpiryTypeNever = exported.ExpiryTypeNever + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go index e9ab4a3ccdcc7..e6851237c9171 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -21,3 +21,6 @@ type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLRe // SealResponse contains the response from method Client.Seal. type SealResponse = generated.AppendBlobClientSealResponse + +// SetExpiryResponse contains the response from method Client.SetExpiry. +type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json new file mode 100644 index 0000000000000..df7d66f021081 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azblob", + "Tag": "go/storage/azblob_9f40a5a13d" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 1b7cf6d865afd..4175f3312cafc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -8,15 +8,17 @@ package blob import ( "context" - "errors" "io" + "math" "os" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -25,9 +27,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client base.Client[generated.BlobClient] @@ -37,12 +37,16 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -51,9 +55,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -63,10 +70,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -100,6 +110,14 @@ func (b *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.BlobClient])(b)) } +func (b *Client) credential() any { + return base.Credential((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -114,7 +132,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -126,7 +144,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -148,10 +166,10 @@ func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResp // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { opts, leaseAccessConditions, modifiedAccessConditions := o.format() resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) @@ -176,7 +194,7 @@ func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) { +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) { basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) @@ -231,19 +249,52 @@ func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsR } +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked". +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) { + blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format() + blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime + resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions) + return resp, err +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) { + deleteImmutabilityOptions := options.format() + resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions) + return resp, err +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) { + setLegalHoldOptions := options.format() + resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions) + return resp, err +} + // CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) return resp, err } // GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) { +func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if b.sharedKey() == nil { - return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } urlParts, err := ParseURL(b.URL()) @@ -256,17 +307,16 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, exp if err != nil { t = time.Time{} } + st := o.format() qps, err := sas.BlobSignatureValues{ ContainerName: urlParts.ContainerName, BlobName: urlParts.BlobName, SnapshotTime: t, Version: sas.Version, - - Permissions: permissions.String(), - - StartTime: start.UTC(), - ExpiryTime: expiry.UTC(), + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), }.SignWithSharedKey(b.sharedKey()) if err != nil { @@ -280,8 +330,8 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, exp // Concurrent Download Functions ----------------------------------------------------------------------------------------- -// download downloads an Azure blob to a WriterAt in parallel. -func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { if o.BlockSize == 0 { o.BlockSize = DefaultDownloadBlockSize } @@ -289,12 +339,11 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt count := o.Range.Count if count == CountToEnd { // If size not specified, calculate it // If we don't have the length at all, get it - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) if err != nil { return 0, err } - count = *dr.ContentLength - o.Range.Offset + count = *gr.ContentLength - o.Range.Offset } if count <= 0 { @@ -310,9 +359,9 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, + NumChunks: uint16(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, - Operation: func(chunkStart int64, count int64, ctx context.Context) error { - + Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ Offset: chunkStart + o.Range.Offset, Count: count, @@ -349,6 +398,165 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt return count, nil } +// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely, +// but written to file serially +func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + if o.Concurrency == 0 { + o.Concurrency = DefaultConcurrency + } + + count := o.Range.Count + if count == CountToEnd { //Calculate size if not specified + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + // helper routine to get body + getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) { + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: size, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return nil, err + } + + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + return body, nil + } + + // if file fits in a single buffer, we'll download here. + if count <= o.BlockSize { + body, err := getBodyForRange(ctx, int64(0), count) + if err != nil { + return 0, err + } + defer body.Close() + + return io.Copy(writer, body) + } + + buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) + defer buffers.Free() + + numChunks := uint16((count-1)/o.BlockSize + 1) + for bufferCounter := float64(0); bufferCounter < math.Min(float64(numChunks), float64(o.Concurrency)); bufferCounter++ { + if _, err := buffers.Grow(); err != nil { + return 0, err + } + } + + acquireBuffer := func() ([]byte, error) { + return <-buffers.Acquire(), nil + } + + blocks := make([]chan []byte, numChunks) + for b := range blocks { + blocks[b] = make(chan []byte) + } + + /* + * We have created as many channels as the number of chunks we have. + * Each downloaded block will be sent to the channel matching its + * sequence number, i.e. 0th block is sent to 0th channel, 1st block + * to 1st channel and likewise. The blocks are then read and written + * to the file serially by below goroutine. Do note that the blocks + * are still downloaded parallelly from n/w, only serialized + * and written to file here. + */ + writerError := make(chan error) + writeSize := int64(0) + go func(ch chan error) { + for _, block := range blocks { + select { + case <-ctx.Done(): + return + case block := <-block: + n, err := writer.Write(block) + writeSize += int64(n) + buffers.Release(block[:cap(block)]) + if err != nil { + ch <- err + return + } + } + } + ch <- nil + }(writerError) + + // Prepare and do parallel download. + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + NumChunks: numChunks, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + buff, err := acquireBuffer() + if err != nil { + return err + } + + body, err := getBodyForRange(ctx, chunkStart, count) + if err != nil { + buffers.Release(buff) + return nil + } + + _, err = io.ReadFull(body, buff[:count]) + body.Close() + if err != nil { + return err + } + + blockIndex := chunkStart / o.BlockSize + blocks[blockIndex] <- buff[:count] + return nil + }, + }) + + if err != nil { + return 0, err + } + // error from writer thread. + if err = <-writerError; err != nil { + return 0, err + } + return writeSize, nil +} + // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { @@ -363,12 +571,12 @@ func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) ( } return DownloadStreamResponse{ - client: b, - BlobClientDownloadResponse: dr, - getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, - ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), - cpkInfo: o.CpkInfo, - cpkScope: o.CpkScopeInfo, + client: b, + DownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CPKInfo, + cpkScope: o.CPKScopeInfo, }, err } @@ -377,7 +585,7 @@ func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadB if o == nil { o = &DownloadBufferOptions{} } - return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) } // DownloadFile downloads an Azure blob to a local file. @@ -388,6 +596,11 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } do := (*downloadOptions)(o) + filePointer, err := file.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + // 1. Calculate the size of the destination file var size int64 @@ -416,7 +629,15 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.download(ctx, file, *do) + writeSize, err := b.downloadFile(ctx, file, *do) + if err != nil { + return 0, err + } + _, err = file.Seek(filePointer, io.SeekStart) + if err != nil { + return 0, err + } + return writeSize, nil } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go index d0e5d7d825cce..daef800ed0bd9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -9,6 +9,7 @@ package blob import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -18,6 +19,9 @@ const ( // DefaultDownloadBlockSize is default block size DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency ) // BlobType defines values for BlobType @@ -47,12 +51,13 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { return generated.PossibleDeleteSnapshotsOptionTypeValues() } -// AccessTier defines values for Blob Access Tier +// AccessTier defines values for Blob Access Tier. type AccessTier = generated.AccessTier const ( AccessTierArchive AccessTier = generated.AccessTierArchive AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold AccessTierHot AccessTier = generated.AccessTierHot AccessTierP10 AccessTier = generated.AccessTierP10 AccessTierP15 AccessTier = generated.AccessTierP15 @@ -129,7 +134,7 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { return generated.PossibleCopyStatusTypeValues() } -// EncryptionAlgorithmType defines values for EncryptionAlgorithmType +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. type EncryptionAlgorithmType = generated.EncryptionAlgorithmType const ( @@ -142,12 +147,13 @@ func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return generated.PossibleEncryptionAlgorithmTypeValues() } -// ArchiveStatus defines values for ArchiveStatus +// ArchiveStatus defines values for ArchiveStatus. type ArchiveStatus = generated.ArchiveStatus const ( ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold ) // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. @@ -155,7 +161,7 @@ func PossibleArchiveStatusValues() []ArchiveStatus { return generated.PossibleArchiveStatusValues() } -// DeleteType defines values for DeleteType +// DeleteType defines values for DeleteType. type DeleteType = generated.DeleteType const ( @@ -168,21 +174,6 @@ func PossibleDeleteTypeValues() []DeleteType { return generated.PossibleDeleteTypeValues() } -// ExpiryOptions defines values for ExpiryOptions -type ExpiryOptions = generated.ExpiryOptions - -const ( - ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute - ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire - ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation - ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow -) - -// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. -func PossibleExpiryOptionsValues() []ExpiryOptions { - return generated.PossibleExpiryOptionsValues() -} - // QueryFormatType - The quick query format type. type QueryFormatType = generated.QueryFormatType @@ -198,44 +189,47 @@ func PossibleQueryFormatTypeValues() []QueryFormatType { return generated.PossibleQueryFormatTypeValues() } -// LeaseDurationType defines values for LeaseDurationType -type LeaseDurationType = generated.LeaseDurationType +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType -const ( - LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite - LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed -) +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64 -// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. -func PossibleLeaseDurationTypeValues() []LeaseDurationType { - return generated.PossibleLeaseDurationTypeValues() +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return exported.TransferValidationTypeComputeCRC64() } -// LeaseStateType defines values for LeaseStateType -type LeaseStateType = generated.LeaseStateType +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 -const ( - LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable - LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased - LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired - LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking - LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken -) +// SourceContentValidationType abstracts the various mechanisms used to validate source content. +// This interface is not publicly implementable. +type SourceContentValidationType interface { + Apply(generated.SourceContentSetter) + notPubliclyImplementable() +} -// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. -func PossibleLeaseStateTypeValues() []LeaseStateType { - return generated.PossibleLeaseStateTypeValues() +// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64. +type SourceContentValidationTypeCRC64 []byte + +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64. +func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) { + src.SetSourceContentCRC64(s) } -// LeaseStatusType defines values for LeaseStatusType -type LeaseStatusType = generated.LeaseStatusType +func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {} -const ( - LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked - LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked -) +var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil) + +// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5. +type SourceContentValidationTypeMD5 []byte -// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. -func PossibleLeaseStatusTypeValues() []LeaseStatusType { - return generated.PossibleLeaseStatusTypeValues() +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5. +func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) { + src.SetSourceContentMD5(s) } + +func (SourceContentValidationTypeMD5) notPubliclyImplementable() {} + +var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index 86af620c35748..d7334688946f5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -34,11 +34,11 @@ type LeaseAccessConditions = exported.LeaseAccessConditions // ModifiedAccessConditions contains a group of parameters for specifying access conditions. type ModifiedAccessConditions = exported.ModifiedAccessConditions -// CpkInfo contains a group of parameters for client provided encryption key. -type CpkInfo = generated.CpkInfo +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = generated.CPKInfo -// CpkScopeInfo contains a group of parameters for client provided encryption scope. -type CpkScopeInfo = generated.CpkScopeInfo +// CPKScopeInfo contains a group of parameters for client provided encryption scope. +type CPKScopeInfo = generated.CPKScopeInfo // HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. type HTTPHeaders = generated.BlobHTTPHeaders @@ -51,7 +51,7 @@ type Tags = generated.BlobTag // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange // Request Model Declaration ------------------------------------------------------------------------------------------- @@ -66,11 +66,11 @@ type DownloadStreamOptions struct { Range HTTPRange AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } @@ -81,7 +81,7 @@ func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions + return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -101,10 +101,10 @@ type downloadOptions struct { AccessConditions *AccessConditions // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo - // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. @@ -117,7 +117,7 @@ func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { } return &GetPropertiesOptions{ AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, + CPKInfo: o.CPKInfo, } } @@ -127,8 +127,8 @@ func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContent } return &DownloadStreamOptions{ AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, Range: rnge, RangeGetContentMD5: rangeGetContentMD5, } @@ -148,13 +148,13 @@ type DownloadBufferOptions struct { // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. AccessConditions *AccessConditions - // CpkInfo contains a group of parameters for client provided encryption key. - CpkInfo *CpkInfo + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo - // CpkScopeInfo contains a group of parameters for client provided encryption scope. - CpkScopeInfo *CpkScopeInfo + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo - // Concurrency indicates the maximum number of blocks to download in parallel (0=default) + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). Concurrency uint16 // RetryReaderOptionsPerBlock is used when downloading each block. @@ -176,8 +176,8 @@ type DownloadFileOptions struct { AccessConditions *AccessConditions // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. Concurrency uint16 @@ -191,9 +191,14 @@ type DownloadFileOptions struct { // DeleteOptions contains the optional parameters for the Client.Delete method. type DeleteOptions struct { // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. DeleteSnapshots *DeleteSnapshotsOptionType AccessConditions *AccessConditions + // Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs. + // WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed + // with caution. + // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob + BlobDeleteType *DeleteType } func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { @@ -203,6 +208,7 @@ func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated basics := generated.BlobClientDeleteOptions{ DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default } if o.AccessConditions == nil { @@ -247,17 +253,17 @@ func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generat // GetPropertiesOptions contains the optional parameters for the Client.GetProperties method type GetPropertiesOptions struct { AccessConditions *AccessConditions - CpkInfo *CpkInfo + CPKInfo *CPKInfo } func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, - *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) { + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions + return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -281,32 +287,32 @@ func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOpt // SetMetadataOptions provides set of configurations for Set Metadata on blob operation type SetMetadataOptions struct { AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo, - *CpkScopeInfo, *ModifiedAccessConditions) { +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo, + *CPKScopeInfo, *ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. type CreateSnapshotOptions struct { - Metadata map[string]string + Metadata map[string]*string AccessConditions *AccessConditions - CpkInfo *CpkInfo - CpkScopeInfo *CpkScopeInfo + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo } -func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } @@ -315,7 +321,7 @@ func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOpt return &generated.BlobClientCreateSnapshotOptions{ Metadata: o.Metadata, - }, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions + }, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -335,7 +341,7 @@ type StartCopyFromURLOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Optional: Indicates the priority with which to rehydrate an archived blob. RehydratePriority *RehydratePriority // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. @@ -442,6 +448,75 @@ func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generat // --------------------------------------------------------------------------------------------------------------------- +// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy +type SetImmutabilityPolicyOptions struct { + // Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked". + // "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked. + Mode *ImmutabilityPolicySetting + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { + if o == nil { + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil + } + ac := &exported.BlobAccessConditions{ + ModifiedAccessConditions: o.ModifiedAccessConditions, + } + _, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac) + + options := &generated.BlobClientSetImmutabilityPolicyOptions{ + ImmutabilityPolicyMode: o.Mode, + } + + return options, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method. +type DeleteImmutabilityPolicyOptions struct { + // placeholder for future options +} + +func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method. +type SetLegalHoldOptions struct { + // placeholder for future options +} + +func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. type CopyFromURLOptions struct { // Optional. Used to set blob tags in various blob operations. @@ -460,7 +535,7 @@ type CopyFromURLOptions struct { // is not copied from the source blob or file. Note that beginning with // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, // Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Specify the md5 calculated for the range of bytes that must be read from the copy source. SourceContentMD5 []byte // Optional. Indicates the tier to be set on the blob. @@ -469,11 +544,13 @@ type CopyFromURLOptions struct { SourceModifiedAccessConditions *SourceModifiedAccessConditions BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo } -func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { if o == nil { - return nil, nil, nil, nil + return nil, nil, nil, nil, nil } options := &generated.BlobClientCopyFromURLOptions{ @@ -488,5 +565,16 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go index c13d332d71974..352d975264c0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -13,22 +13,25 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) +// DownloadResponse contains the response from method BlobClient.Download. +type DownloadResponse = generated.BlobClientDownloadResponse + // DownloadStreamResponse contains the response from the DownloadStream method. // To read from the stream, read from the Body field, or call the NewRetryReader method. type DownloadStreamResponse struct { - generated.BlobClientDownloadResponse + DownloadResponse ObjectReplicationRules []ObjectReplicationPolicy client *Client getInfo httpGetterInfo - cpkInfo *CpkInfo - cpkScope *CpkScopeInfo + cpkInfo *CPKInfo + cpkScope *CPKScopeInfo } // NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while // reading, it will make additional requests to reestablish a connection and continue reading. // Pass nil for options to accept the default options. -// Callers of this method should not access the DowloadStreamResponse.Body field. +// Callers of this method should not access the DownloadStreamResponse.Body field. func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { if options == nil { options = &RetryReaderOptions{} @@ -41,8 +44,8 @@ func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *Re options := DownloadStreamOptions{ Range: getInfo.Range, AccessConditions: accessConditions, - CpkInfo: r.cpkInfo, - CpkScopeInfo: r.cpkScope, + CPKInfo: r.cpkInfo, + CPKScopeInfo: r.cpkScope, } resp, err := r.client.DownloadStream(ctx, &options) if err != nil { @@ -85,9 +88,21 @@ type SetTagsResponse = generated.BlobClientSetTagsResponse // GetTagsResponse contains the response from method BlobClient.GetTags. type GetTagsResponse = generated.BlobClientGetTagsResponse +// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse + +// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse. +type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse + +// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse + // CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse +// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse + // AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index cc1b1365d7c65..1deedb5902e13 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -58,7 +58,7 @@ type RetryReaderOptions struct { injectedError error } -// RetryReader attempts to read from response, and if there is retriable network error +// RetryReader attempts to read from response, and if there is a retry-able network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. @@ -167,7 +167,7 @@ func (s *RetryReader) Read(p []byte) (n int, err error) { // net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" // which is exactly the behaviour we want. // NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) -// then there are two different types of error that may happen - either the one one we check for here, +// then there are two different types of error that may happen - either the one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. func (s *RetryReader) wasRetryableEarlyClose(err error) bool { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go index 7b4c8e248991b..c2d517d8ad291 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -14,25 +14,25 @@ import ( // ObjectReplicationRules struct type ObjectReplicationRules struct { - RuleId string + RuleID string Status string } -// ObjectReplicationPolicy are deserialized attributes +// ObjectReplicationPolicy are deserialized attributes. type ObjectReplicationPolicy struct { - PolicyId *string + PolicyID *string Rules *[]ObjectReplicationRules } -// deserializeORSPolicies is utility function to deserialize ORS Policies -func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { +// deserializeORSPolicies is utility function to deserialize ORS Policies. +func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) { if policies == nil { return nil } // For source blobs (blobs that have policy ids and rule ids applied to them), // the header will be formatted as "x-ms-or-_: {Complete, Failed}". // The value of this header is the status of the replication. - orPolicyStatusHeader := make(map[string]string) + orPolicyStatusHeader := make(map[string]*string) for key, value := range policies { if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { orPolicyStatusHeader[key] = value @@ -44,19 +44,19 @@ func deserializeORSPolicies(policies map[string]string) (objectReplicationPolici policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] - parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value}) } for policyId, rules := range parsedResult { objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ - PolicyId: &policyId, + PolicyID: &policyId, Rules: &rules, }) } return } -// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders. func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { return HTTPHeaders{ BlobContentType: resp.ContentType, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index 6a1db8045c525..07fad60611b32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -69,6 +69,7 @@ const ( CopyIDMismatch Code = "CopyIdMismatch" EmptyMetadataKey Code = "EmptyMetadataKey" FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" @@ -111,6 +112,7 @@ const ( LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" MD5Mismatch Code = "Md5Mismatch" + CRC64Mismatch Code = "Crc64Mismatch" MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" MetadataTooLarge Code = "MetadataTooLarge" MissingContentLengthHeader Code = "MissingContentLengthHeader" @@ -121,6 +123,7 @@ const ( NoAuthenticationInformation Code = "NoAuthenticationInformation" NoPendingCopyOperation Code = "NoPendingCopyOperation" OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" OperationTimedOut Code = "OperationTimedOut" OutOfRangeInput Code = "OutOfRangeInput" OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" @@ -148,3 +151,9 @@ const ( UnsupportedQueryParameter Code = "UnsupportedQueryParameter" UnsupportedXMLNode Code = "UnsupportedXmlNode" ) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 16927ecf895ed..24df42c75ef05 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -12,11 +12,11 @@ import ( "encoding/base64" "encoding/binary" "errors" - "fmt" "io" "sync" "sync/atomic" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) @@ -25,212 +25,225 @@ import ( // This allows us to provide a local implementation that fakes the server for hermetic testing. type blockWriter interface { StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error) CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably -// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The -// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload -// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works -// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can -// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). -// We can even provide a utility to dial this number in for customer networks to optimize their copies. -func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) { - if err := o.format(); err != nil { - return CommitBlockListResponse{}, err - } +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { + options.setDefaults() + + wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + buffers := getBufferManager(options.Concurrency, options.BlockSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. ctx, cancel := context.WithCancel(ctx) defer cancel() - var err error - generatedUuid, err := uuid.New() + // all blocks have IDs that start with a random UUID + blockIDPrefix, err := uuid.New() if err != nil { return CommitBlockListResponse{}, err } - - cp := &copier{ - ctx: ctx, - cancel: cancel, - reader: from, - to: to, - id: newID(generatedUuid), - o: o, - errCh: make(chan error, 1), + tracker := blockTracker{ + blockIDPrefix: blockIDPrefix, + options: options, } - // Send all our chunks until we get an error. - for { - if err = cp.sendChunk(); err != nil { + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the block. + for blockNum := uint32(0); true; blockNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return CommitBlockListResponse{}, err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass blockNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(blockNum uint32) { + // Upload the outgoing block, matching the number of bytes read + err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n]) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the block has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(blockNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } break } } - // If the error is not EOF, then we have a problem. - if err != nil && !errors.Is(err, io.EOF) { + + wg.Wait() // Wait for all outgoing blocks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging return CommitBlockListResponse{}, err } - // Close out our upload. - if err := cp.close(); err != nil { + select { + case err = <-errCh: + // there was an error during staging return CommitBlockListResponse{}, err + default: + // no error was encountered } - return cp.result, nil -} - -// copier streams a file via chunks in parallel from a reader representing a file. -// Do not use directly, instead use copyFromReader(). -type copier struct { - // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, - // the copier has the lifetime of a function call, so it's fine. - ctx context.Context - cancel context.CancelFunc - - // reader is the source to be written to storage. - reader io.Reader - // to is the location we are writing our chunks to. - to blockWriter - - // o contains our options for uploading. - o UploadStreamOptions - - // id provides the ids for each chunk. - id *id - - //// num is the current chunk we are on. - //num int32 - //// ch is used to pass the next chunk of data from our reader to one of the writers. - //ch chan copierChunk - - // errCh is used to hold the first error from our concurrent writers. - errCh chan error - // wg provides a count of how many writers we are waiting to finish. - wg sync.WaitGroup - - // result holds the final result from blob storage after we have submitted all chunks. - result CommitBlockListResponse + // If no error, after all blocks uploaded, commit them to the blob & return the result + return tracker.commitBlocks(ctx, dst) } -// copierChunk contains buffer -type copierChunk struct { - buffer []byte - id string - length int +// used to manage the uploading and committing of blocks +type blockTracker struct { + blockIDPrefix uuid.UUID // UUID used with all blockIDs + maxBlockNum uint32 // defaults to 0 + firstBlock []byte // Used only if maxBlockNum is 0 + options UploadStreamOptions } -// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error -// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier. -func (c *copier) getErr() error { - select { - case err := <-c.errCh: - return err - default: - } - return c.ctx.Err() -} +func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error { + if num == 0 { + bt.firstBlock = buffer -// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. -// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. -func (c *copier) sendChunk() error { - if err := c.getErr(); err != nil { - return err + // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation + // If the payload is exactly the same size as the buffer, there may be more content coming in. + if len(buffer) < int(bt.options.BlockSize) { + return nil + } + } else { + // Else, upload a staged block... + atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) { + // Atomically remember (in t.numBlocks) the maximum block num we've ever seen + if startVal < num { + return num, 0 + } + return startVal, 0 + }) } - buffer := c.o.transferManager.Get() - if len(buffer) == 0 { - return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager") - } + blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64() + _, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions()) + return err +} - n, err := io.ReadFull(c.reader, buffer) - if n > 0 { - // Some data was read, schedule the Write. - id := c.id.next() - c.wg.Add(1) - c.o.transferManager.Run( - func() { - defer c.wg.Done() - c.write(copierChunk{buffer: buffer, id: id, length: n}) - }, - ) - } else { - // Return the unused buffer to the manager. - c.o.transferManager.Put(buffer) - } +func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) { + // If the first block had the exact same size as the buffer + // we would have staged it as a block thinking that there might be more data coming + if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) { + // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation + up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions()) + if err != nil { + return CommitBlockListResponse{}, err + } - if err == nil { - return nil - } else if err == io.EOF || err == io.ErrUnexpectedEOF { - return io.EOF + // convert UploadResponse to CommitBlockListResponse + return CommitBlockListResponse{ + ClientRequestID: up.ClientRequestID, + ContentMD5: up.ContentMD5, + Date: up.Date, + ETag: up.ETag, + EncryptionKeySHA256: up.EncryptionKeySHA256, + EncryptionScope: up.EncryptionScope, + IsServerEncrypted: up.IsServerEncrypted, + LastModified: up.LastModified, + RequestID: up.RequestID, + Version: up.Version, + VersionID: up.VersionID, + //ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse + }, nil } - if cerr := c.getErr(); cerr != nil { - return cerr + // Multiple blocks staged, commit them all now + blockID := newUUIDBlockID(bt.blockIDPrefix) + blockIDs := make([]string, bt.maxBlockNum+1) + for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ { + blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() } - return err + return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions()) } -// write uploads a chunk to blob storage. -func (c *copier) write(chunk copierChunk) { - defer c.o.transferManager.Put(chunk.buffer) +// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32) - if err := c.ctx.Err(); err != nil { - return - } - stageBlockOptions := c.o.getStageBlockOptions() - _, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) - if err != nil { - select { - case c.errCh <- err: - // failed to stage block, cancel the copy - default: - // don't block the goroutine if there's a pending error +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 { + for { + currentVal := atomic.LoadUint32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { + return morphResult } } } -// close commits our blocks to blob storage and closes our writer. -func (c *copier) close() error { - c.wg.Wait() +type blockID [64]byte - if err := c.getErr(); err != nil { - return err - } - - var err error - commitBlockListOptions := c.o.getCommitBlockListOptions() - c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions) - return err +func (blockID blockID) ToBase64() string { + return base64.StdEncoding.EncodeToString(blockID[:]) } -// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. -type id struct { - u [64]byte - num uint32 - all []string -} +type uuidBlockID blockID -// newID constructs a new id. -func newID(uu uuid.UUID) *id { - u := [64]byte{} - copy(u[:], uu[:]) - return &id{u: u} +func newUUIDBlockID(u uuid.UUID) uuidBlockID { + ubi := uuidBlockID{} // Create a new uuidBlockID + copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it + // Block number defaults to 0 + return ubi } -// next returns the next ID. -func (id *id) next() string { - defer atomic.AddUint32(&id.num, 1) - - binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num)) - str := base64.StdEncoding.EncodeToString(id.u[:]) - id.all = append(id.all, str) - - return str +func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { + binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID + return ubi // Return the passed-in copy } -// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return. -// The value is only valid until the next time next() is called. -func (id *id) issued() []string { - return id.all +func (ubi uuidBlockID) ToBase64() string { + return blockID(ubi).ToBase64() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 4af33356e291f..1e8da9e325469 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -11,14 +11,21 @@ import ( "context" "encoding/base64" "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" + "math" "os" + "reflect" "sync" + "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" @@ -28,9 +35,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client defines a set of operations applicable to block blobs. type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] @@ -40,12 +45,16 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,9 +63,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -66,10 +79,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -104,12 +121,17 @@ func (bb *Client) generated() *generated.BlockBlobClient { return blockBlob } +func (bb *Client) innerBlobGenerated() *generated.BlobClient { + b := bb.BlobClient() + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (bb *Client) URL() string { return bb.generated().Endpoint() } -// BlobClient returns the embedded blob client for this AppendBlob client. +// BlobClient returns the embedded blob client for this BlockBlob client. func (bb *Client) BlobClient() *blob.Client { blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) return (*blob.Client)(blobClient) @@ -124,7 +146,7 @@ func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -136,7 +158,7 @@ func (bb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // Upload creates a new block blob or overwrites an existing block blob. @@ -154,10 +176,30 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) return resp, err } +// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten +// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url +func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() + + resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) + + return resp, err +} + // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. @@ -169,6 +211,13 @@ func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io. opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return StageBlockResponse{}, nil + } + } + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) return resp, err } @@ -176,12 +225,11 @@ func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io. // StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. // If count is CountToEnd (0), then data is read from specified offset to the end. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, - contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() - resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) return resp, err @@ -205,8 +253,8 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, var commitOptions *generated.BlockBlobClientCommitBlockListOptions var headers *generated.BlobHTTPHeaders var leaseAccess *blob.LeaseAccessConditions - var cpkInfo *generated.CpkInfo - var cpkScope *generated.CpkScopeInfo + var cpkInfo *generated.CPKInfo + var cpkScope *generated.CPKScopeInfo var modifiedAccess *generated.ModifiedAccessConditions if options != nil { @@ -218,12 +266,20 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, Timeout: options.Timeout, TransactionalContentCRC64: options.TransactionalContentCRC64, TransactionalContentMD5: options.TransactionalContentMD5, + LegalHold: options.LegalHold, + ImmutabilityPolicyMode: options.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, + } + + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum } headers = options.HTTPHeaders leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) - cpkInfo = options.CpkInfo - cpkScope = options.CpkScopeInfo + cpkInfo = options.CPKInfo + cpkScope = options.CPKScopeInfo } resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) @@ -255,9 +311,27 @@ func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U return bb.BlobClient().Undelete(ctx, o) } +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return bb.BlobClient().SetLegalHold(ctx, legalHold, options) +} + // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. // For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. @@ -265,12 +339,29 @@ func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.Set return bb.BlobClient().SetTier(ctx, tier, o) } +// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry +func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { + if expiryType == nil { + expiryType = ExpiryTypeNever{} + } + et, opts := expiryType.Format(o) + resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts) + return resp, err +} + // GetProperties returns the blob's properties. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { return bb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return bb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -279,7 +370,7 @@ func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return bb.BlobClient().SetMetadata(ctx, metadata, o) } @@ -321,34 +412,36 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return bb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Upload Functions ----------------------------------------------------------------------------------------- // uploadFromReader uploads a buffer in blocks to a block blob. -func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { if o.BlockSize == 0 { // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error - if readerSize > MaxStageBlockBytes*MaxBlocks { + if actualSize > MaxStageBlockBytes*MaxBlocks { return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") } // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { o.BlockSize = MaxUploadBlobBytes // Default if unspecified } else { - if remainder := readerSize % MaxBlocks; remainder > 0 { - // ensure readerSize is a multiple of MaxBlocks - readerSize += (MaxBlocks - remainder) - } - o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = blob.DefaultDownloadBlockSize } // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). } } - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) if o.Progress != nil { body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) } @@ -359,26 +452,41 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, read return toUploadReaderAtResponseFromUploadResponse(resp), err } - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) if numBlocks > MaxBlocks { // prevent any math bugs from attempting to upload too many blocks which will always fail return uploadFromReaderResponse{}, errors.New("block limit exceeded") } + if log.Should(exported.EventUpload) { + urlparts, err := blob.ParseURL(bb.generated().Endpoint()) + if err == nil { + log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v", + urlparts.BlobName, actualSize, o.BlockSize, numBlocks) + } + } + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs progress := int64(0) progressLock := &sync.Mutex{} err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ OperationName: "uploadFromReader", - TransferSize: readerSize, + TransferSize: actualSize, ChunkSize: o.BlockSize, + NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, - Operation: func(offset int64, count int64, ctx context.Context) error { + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. // It is passed this block's offset within the buffer and its count of bytes // Prepare to read the proper block/section of the buffer - var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + if chunkSize < o.BlockSize { + // this is the last block. its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of blocks. + chunkSize = (actualSize - offset) + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) blockNum := offset / o.BlockSize if o.Progress != nil { blockProgress := int64(0) @@ -421,6 +529,12 @@ func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBuff if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) } @@ -434,26 +548,28 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) } // UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. // A Context deadline or cancellation will cause this to error. func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { - if err := o.format(); err != nil { - return CommitBlockListResponse{}, err - } - if o == nil { o = &UploadStreamOptions{} } - // If we used the default manager, we need to close it. - if o.transferMangerNotSet { - defer o.transferManager.Close() + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum } - result, err := copyFromReader(ctx, body, bb, *o) + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) if err != nil { return CommitBlockListResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go index 46de1ede7e16d..ce3a5d8de3f2d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -8,9 +8,8 @@ package blockblob import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -// nolint const ( - // CountToEnd specifies the end of the file + // CountToEnd specifies the end of the file. CountToEnd = 0 _1MiB = 1024 * 1024 @@ -38,3 +37,16 @@ const ( func PossibleBlockListTypeValues() []BlockListType { return generated.PossibleBlockListTypeValues() } + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go index 1e93143642d89..453d569e5d2d5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -7,7 +7,7 @@ package blockblob import ( - "fmt" + "time" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" @@ -20,7 +20,7 @@ import ( // Block - Represents a single block in a block blob. It describes the block's ID and size. type Block = generated.Block -// BlockList - type of blocklist (committed/uncommitted) +// BlockList - can be uncommitted or committed blocks (committed/uncommitted) type BlockList = generated.BlockList // Request Model Declaration ------------------------------------------------------------------------------------------- @@ -31,64 +31,123 @@ type UploadOptions struct { Tags map[string]string // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string + Metadata map[string]*string // Optional. Indicates the tier to be set on the blob. Tier *blob.AccessTier - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType - HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo - AccessConditions *blob.AccessConditions + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte } func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } basics := generated.BlockBlobClientUploadOptions{ - BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), - Metadata: o.Metadata, - Tier: o.Tier, - TransactionalContentMD5: o.TransactionalContentMD5, + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + LegalHold: o.LegalHold, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. +type UploadBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Additional optional headers + HTTPHeaders *blob.HTTPHeaders + AccessConditions *blob.AccessConditions + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions +} + +func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, + *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := generated.BlockBlobClientPutBlobFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + CopySourceAuthorization: o.CopySourceAuthorization, + CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // StageBlockOptions contains the optional parameters for the Client.StageBlock method. type StageBlockOptions struct { - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo LeaseAccessConditions *blob.LeaseAccessConditions - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType } // StageBlockOptions contains the optional parameters for the Client.StageBlock method. -func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) { +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) { if o == nil { return nil, nil, nil, nil } - return &generated.BlockBlobClientStageBlockOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo + return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- @@ -101,49 +160,57 @@ type StageBlockFromURLOptions struct { LeaseAccessConditions *blob.LeaseAccessConditions SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType // Range specifies a range of bytes. The default value is all bytes. Range blob.HTTPRange - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo } -func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } options := &generated.BlockBlobClientStageBlockFromURLOptions{ CopySourceAuthorization: o.CopySourceAuthorization, - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, SourceRange: exported.FormatHTTPRange(o.Range), } - return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + + return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. type CommitBlockListOptions struct { - Tags map[string]string - Metadata map[string]string - RequestID *string - Tier *blob.AccessTier - Timeout *int32 + Tags map[string]string + Metadata map[string]*string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte - HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo - AccessConditions *blob.AccessConditions + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte } // --------------------------------------------------------------------------------------------------------------------- @@ -178,7 +245,7 @@ type uploadFromReaderOptions struct { HTTPHeaders *blob.HTTPHeaders // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata map[string]string + Metadata map[string]*string // AccessConditions indicates the access conditions for the block blob. AccessConditions *blob.AccessConditions @@ -190,30 +257,35 @@ type uploadFromReaderOptions struct { Tags map[string]string // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) Concurrency uint16 - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 *[]byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 *[]byte + TransactionalValidation blob.TransferValidationType + + // Deprecated: TransactionalContentCRC64 cannot be generated at block level + TransactionalContentCRC64 uint64 + + // Deprecated: TransactionalContentMD5 cannot be generated at block level + TransactionalContentMD5 []byte } -// UploadBufferOptions provides set of configurations for UploadBuffer operation +// UploadBufferOptions provides set of configurations for UploadBuffer operation. type UploadBufferOptions = uploadFromReaderOptions -// UploadFileOptions provides set of configurations for UploadFile operation +// UploadFileOptions provides set of configurations for UploadFile operation. type UploadFileOptions = uploadFromReaderOptions func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) return &StageBlockOptions{ - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, LeaseAccessConditions: leaseAccessConditions, + + TransactionalValidation: o.TransactionalValidation, } } @@ -224,8 +296,8 @@ func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { Tier: o.AccessTier, HTTPHeaders: o.HTTPHeaders, AccessConditions: o.AccessConditions, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, } } @@ -235,41 +307,34 @@ func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOp Metadata: o.Metadata, Tier: o.AccessTier, HTTPHeaders: o.HTTPHeaders, - CpkInfo: o.CpkInfo, - CpkScopeInfo: o.CpkScopeInfo, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, } } // --------------------------------------------------------------------------------------------------------------------- -// UploadStreamOptions provides set of configurations for UploadStream operation +// UploadStreamOptions provides set of configurations for UploadStream operation. type UploadStreamOptions struct { - // transferManager provides a transferManager that controls buffer allocation/reuse and - // concurrency. This overrides BlockSize and MaxConcurrency if set. - transferManager shared.TransferManager - transferMangerNotSet bool - - // BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB. - BlockSize int + // BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. + BlockSize int64 - // Concurrency defines the number of concurrent uploads to be performed to upload the file. + // Concurrency defines the max number of concurrent uploads to be performed to upload the file. // Each concurrent upload will create a buffer of size BlockSize. The default value is one. Concurrency int + TransactionalValidation blob.TransferValidationType + HTTPHeaders *blob.HTTPHeaders - Metadata map[string]string + Metadata map[string]*string AccessConditions *blob.AccessConditions AccessTier *blob.AccessTier Tags map[string]string - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo } -func (u *UploadStreamOptions) format() error { - if u == nil || u.transferManager != nil { - return nil - } - +func (u *UploadStreamOptions) setDefaults() { if u.Concurrency == 0 { u.Concurrency = 1 } @@ -277,35 +342,70 @@ func (u *UploadStreamOptions) format() error { if u.BlockSize < _1MiB { u.BlockSize = _1MiB } - - var err error - u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency) - if err != nil { - return fmt.Errorf("bug: default transfer manager could not be created: %s", err) - } - u.transferMangerNotSet = true - return nil } func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + if u == nil { + return nil + } + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) return &StageBlockOptions{ - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, - LeaseAccessConditions: leaseAccessConditions, + TransactionalValidation: u.TransactionalValidation, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + LeaseAccessConditions: leaseAccessConditions, } } func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { - options := &CommitBlockListOptions{ + if u == nil { + return nil + } + + return &CommitBlockListOptions{ Tags: u.Tags, Metadata: u.Metadata, Tier: u.AccessTier, HTTPHeaders: u.HTTPHeaders, - CpkInfo: u.CpkInfo, - CpkScopeInfo: u.CpkScopeInfo, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, AccessConditions: u.AccessConditions, } +} + +func (u *UploadStreamOptions) getUploadOptions() *UploadOptions { + if u == nil { + return nil + } - return options + return &UploadOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + AccessConditions: u.AccessConditions, + } } + +// --------------------------------------------------------------------------------------------------------------------- + +// ExpiryType defines values for ExpiryType. +type ExpiryType = exported.ExpiryType + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry. +type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry. +type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry. +type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation + +// ExpiryTypeNever defines that the blob will be set to never expire. +type ExpiryTypeNever = exported.ExpiryTypeNever + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go index dab1873b1a9d7..917f718097792 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -16,6 +16,9 @@ import ( // UploadResponse contains the response from method Client.Upload. type UploadResponse = generated.BlockBlobClientUploadResponse +// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL +type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse + // StageBlockResponse contains the response from method Client.StageBlock. type StageBlockResponse = generated.BlockBlobClientStageBlockResponse @@ -97,7 +100,7 @@ func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListRes RequestID: resp.RequestID, Version: resp.Version, VersionID: resp.VersionID, - ContentCRC64: resp.XMSContentCRC64, + ContentCRC64: resp.ContentCRC64, } } @@ -109,3 +112,6 @@ type UploadBufferResponse = uploadFromReaderResponse // UploadStreamResponse contains the response from method Client.CommitBlockList. type UploadStreamResponse = CommitBlockListResponse + +// SetExpiryResponse contains the response from method Client.SetExpiry. +type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index e0623f50e8554..03035033891f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -26,3 +26,9 @@ stages: parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + UsePipelineProxy: false + EnvVars: + AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) + AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 4cc9a51bb402e..c511d8a79f208 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -13,14 +13,13 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client struct { @@ -32,11 +31,7 @@ type Client struct { // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -51,11 +46,7 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -84,15 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti // - connectionString - a connection string for the desired storage account // - options - client options; pass nil to accept the default values func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - if options == nil { - options = &ClientOptions{} - } - containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) if err != nil { return nil, err } return &Client{ - svc: containerClient, + svc: svcClient, }, nil } @@ -101,6 +89,11 @@ func (c *Client) URL() string { return c.svc.URL() } +// ServiceClient returns the embedded service client for this client. +func (c *Client) ServiceClient() *service.Client { + return c.svc +} + // CreateContainer is a lifecycle method to creates a new container under the specified account. // If the container with the same name already exists, a ResourceExistsError will be raised. // This method returns a client with which to interact with the newly created container. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go index 560e151d553ff..48771e8c9c270 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) { // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go index d4c3262d932d6..c42fcdec7f91d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -10,7 +10,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) -// PublicAccessType defines values for AccessType - private (default) or blob or container +// PublicAccessType defines values for AccessType - private (default) or blob or container. type PublicAccessType = generated.PublicAccessType const ( @@ -23,7 +23,7 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { return generated.PossiblePublicAccessTypeValues() } -// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType. type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType const ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go new file mode 100644 index 0000000000000..83edea72b2f38 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index e4afad9f84f10..b23798cb1f66f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -7,9 +7,14 @@ package container import ( + "bytes" "context" "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" + "net/url" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -27,9 +32,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. type Client base.Client[generated.ContainerClient] @@ -39,12 +42,16 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -53,9 +60,12 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -65,10 +75,13 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -101,61 +114,71 @@ func (c *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ContainerClient])(c)) } +func (c *Client) credential() any { + return base.Credential((*base.Client[generated.ContainerClient])(c)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (c *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() } -// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of -// Client's URL. The new BlobClient uses the same request policy pipeline as the Client. -// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's -// NewBlobClient method. +// NewBlobClient creates a new blob.Client object by concatenating blobName to the end of +// Client's URL. The blob name will be URL-encoded. +// The new blob.Client uses the same request policy pipeline as this Client. func (c *Client) NewBlobClient(blobName string) *blob.Client { + blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) } -// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of -// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client. -// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's -// NewAppendBlobClient method. +// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new appendblob.Client uses the same request policy pipeline as this Client. func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { + blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } -// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of -// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client. -// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's -// NewBlockBlobClient method. +// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new blockblob.Client uses the same request policy pipeline as this Client. func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { + blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } -// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client. -// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's -// NewPageBlobClient method. +// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new pageblob.Client uses the same request policy pipeline as this Client. func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { + blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { var opts *generated.ContainerClientCreateOptions - var cpkScopes *generated.ContainerCpkScopeInfo + var cpkScopes *generated.ContainerCPKScopeInfo if options != nil { opts = &generated.ContainerClientCreateOptions{ Access: options.Access, Metadata: options.Metadata, } - cpkScopes = options.CpkScopeInfo + cpkScopes = options.CPKScopeInfo } resp, err := c.generated().Create(ctx, opts, cpkScopes) @@ -192,7 +215,7 @@ func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, op // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. + // This allows us to not expose a GetMetadata method at all simplifying the API. // The optionals are nil, like they were in track 1.5 opts, leaseAccessConditions := o.format() @@ -219,9 +242,20 @@ func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) // SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { - accessPolicy, mac, lac := o.format() - resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac) +func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac, acl, err := o.format() + if err != nil { + return SetAccessPolicyResponse{}, err + } + resp, err := c.generated().SetAccessPolicy(ctx, acl, accessPolicy, mac, lac) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions) return resp, err } @@ -252,7 +286,7 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L if err != nil { return ListBlobsFlatResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsFlatResponse{}, err } @@ -267,12 +301,9 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L // NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the // previously-returned Marker) to get the next segment. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. -// AutoPagerBufferSize specifies the channel's buffer size. -// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { listOptions := o.format() return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ @@ -291,7 +322,7 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar if err != nil { return ListBlobsHierarchyResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsHierarchyResponse{}, err } @@ -305,23 +336,22 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar // GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) { +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if c.sharedKey() == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } - + st := o.format() urlParts, err := blob.ParseURL(c.URL()) if err != nil { return "", err } - // Containers do not have snapshots, nor versions. qps, err := sas.BlobSignatureValues{ Version: sas.Version, Protocol: sas.ProtocolHTTPS, ContainerName: urlParts.ContainerName, Permissions: permissions.String(), - StartTime: start.UTC(), + StartTime: st, ExpiryTime: expiry.UTC(), }.SignWithSharedKey(c.sharedKey()) if err != nil { @@ -332,3 +362,76 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time return endpoint, nil } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := c.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(c.getClientOptions())) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: c.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} + +// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + containerClientFilterBlobsOptions := o.format() + resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go index ce6e67b5ed4af..09a8e8ed3f2b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -8,7 +8,33 @@ package container import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" -// PublicAccessType defines values for AccessType - private (default) or blob or container +// AccessTier defines values for blob access tiers. +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container. type PublicAccessType = generated.PublicAccessType const ( @@ -21,7 +47,7 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { return generated.PossiblePublicAccessTypeValues() } -// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS. type SKUName = generated.SKUName const ( @@ -67,48 +93,6 @@ func PossibleBlobTypeValues() []BlobType { return generated.PossibleBlobTypeValues() } -// LeaseStatusType defines values for LeaseStatusType -type LeaseStatusType = generated.LeaseStatusType - -const ( - LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked - LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked -) - -// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. -func PossibleLeaseStatusTypeValues() []LeaseStatusType { - return generated.PossibleLeaseStatusTypeValues() -} - -// LeaseDurationType defines values for LeaseDurationType -type LeaseDurationType = generated.LeaseDurationType - -const ( - LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite - LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed -) - -// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. -func PossibleLeaseDurationTypeValues() []LeaseDurationType { - return generated.PossibleLeaseDurationTypeValues() -} - -// LeaseStateType defines values for LeaseStateType -type LeaseStateType = generated.LeaseStateType - -const ( - LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable - LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased - LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired - LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking - LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken -) - -// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. -func PossibleLeaseStateTypeValues() []LeaseStateType { - return generated.PossibleLeaseStateTypeValues() -} - // ArchiveStatus defines values for ArchiveStatus type ArchiveStatus = generated.ArchiveStatus diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index eb65d5fc3c80e..61d936ab73dbc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -7,7 +7,9 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" + "time" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -24,14 +26,29 @@ func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredentia // Request Model Declaration ------------------------------------------------------------------------------------------- -// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CpkScopeInfo = generated.ContainerCpkScopeInfo +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = generated.ContainerCPKScopeInfo -// BlobProperties - Properties of a blob -type BlobProperties = generated.BlobPropertiesInternal +// BlobFlatListSegment - List of BlobItem. +type BlobFlatListSegment = generated.BlobFlatListSegment -// BlobItem - An Azure Storage blob -type BlobItem = generated.BlobItemInternal +// BlobHierarchyListSegment - List of BlobItem and BlobPrefix. +type BlobHierarchyListSegment = generated.BlobHierarchyListSegment + +// BlobProperties - Properties of a blob. +type BlobProperties = generated.BlobProperties + +// BlobItem - An Azure Storage blob. +type BlobItem = generated.BlobItem + +// BlobTags - Blob tags. +type BlobTags = generated.BlobTags + +// BlobPrefix is a blob's prefix when hierarchically listing blobs. +type BlobPrefix = generated.BlobPrefix + +// BlobTag - a key/value pair on a blob. +type BlobTag = generated.BlobTag // AccessConditions identifies container-specific access conditions which you optionally set. type AccessConditions = exported.ContainerAccessConditions @@ -42,28 +59,28 @@ type LeaseAccessConditions = exported.LeaseAccessConditions // ModifiedAccessConditions contains a group of parameters for specifying access conditions. type ModifiedAccessConditions = exported.ModifiedAccessConditions -// AccessPolicy - An Access policy +// AccessPolicy - An Access policy. type AccessPolicy = generated.AccessPolicy // AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. // Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. type AccessPolicyPermission = exported.AccessPolicyPermission -// SignedIdentifier - signed identifier +// SignedIdentifier - signed identifier. type SignedIdentifier = generated.SignedIdentifier // Request Model Declaration ------------------------------------------------------------------------------------------- // CreateOptions contains the optional parameters for the Client.Create method. type CreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access + // Specifies whether data in the container may be accessed publicly and the level of access. Access *PublicAccessType // Optional. Specifies a user-defined name-value pair associated with the blob. - Metadata map[string]string + Metadata map[string]*string // Optional. Specifies the encryption scope settings to set on the container. - CpkScopeInfo *CpkScopeInfo + CPKScopeInfo *CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- @@ -163,11 +180,11 @@ type ListBlobsFlatOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Filters the results to return only containers whose name begins with the specified prefix. Prefix *string @@ -185,11 +202,11 @@ type ListBlobsHierarchyOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Filters the results to return only containers whose name begins with the specified prefix. Prefix *string @@ -211,9 +228,30 @@ func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHi // --------------------------------------------------------------------------------------------------------------------- +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. type SetMetadataOptions struct { - Metadata map[string]string + Metadata map[string]*string LeaseAccessConditions *LeaseAccessConditions ModifiedAccessConditions *ModifiedAccessConditions } @@ -243,21 +281,147 @@ func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPo // --------------------------------------------------------------------------------------------------------------------- -// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation. type SetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. + // Specifies whether data in the container may be accessed publicly and the level of access. + // If this header is not included in the request, container data is private to the account owner. + Access *PublicAccessType AccessConditions *AccessConditions + ContainerACL []*SignedIdentifier } -func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions, []*SignedIdentifier, error) { if o == nil { - return nil, nil, nil + return nil, nil, nil, nil, nil + } + if o.ContainerACL != nil { + for _, c := range o.ContainerACL { + err := formatTime(c) + if err != nil { + return nil, nil, nil, nil, err + } + } } lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) return &generated.ContainerClientSetAccessPolicyOptions{ Access: o.Access, - }, lac, mac + }, lac, mac, o.ContainerACL, nil +} + +func formatTime(c *SignedIdentifier) error { + if c.AccessPolicy == nil { + return nil + } + + if c.AccessPolicy.Start != nil { + st, err := time.Parse(time.RFC3339, c.AccessPolicy.Start.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Start = &st + } + if c.AccessPolicy.Expiry != nil { + et, err := time.Parse(time.RFC3339, c.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Expiry = &et + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FilterBlobs. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ContainerClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go index 9d8672b135bee..9aaefe277fb97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -7,6 +7,7 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -25,9 +26,15 @@ type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse // ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse + // ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse = generated.ListBlobsHierarchySegmentResponse + // SetMetadataResponse contains the response from method Client.SetMetadata. type SetMetadataResponse = generated.ContainerClientSetMetadataResponse @@ -36,3 +43,27 @@ type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse // SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go index d5b6ed6a70461..9a4806c575774 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -51,7 +51,7 @@ Use the key as the credential parameter to authenticate the client: cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) @@ -59,11 +59,12 @@ Use the key as the credential parameter to authenticate the client: Using a Connection String Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. -To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" - serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil) + handle(err) Using a Shared Access Signature (SAS) Token @@ -82,20 +83,20 @@ You can generate a SAS token from the Azure Portal under Shared Access Signature cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) // Alternatively, you can create SAS on the fly - resources := azblob.AccountSASResourceTypes{Service: true} - permission := azblob.AccountSASPermissions{Read: true} + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} start := time.Now() expiry := start.AddDate(0, 0, 1) - serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) handle(err) - serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil) handle(err) fmt.Println(serviceClientWithSAS.URL()) @@ -135,13 +136,13 @@ Examples handle(err) // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ - serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) handle(err) // ===== 1. Create a container ===== // First, create a container client, and use the Create method to create a new container in your account - containerClient, err := serviceClient.NewContainerClient("testcontainer") + containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer") handle(err) // All APIs have an options' bag struct as a parameter. @@ -154,13 +155,13 @@ Examples uploadData := "Hello world!" // Create a new blockBlobClient from the containerClient - blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt") handle(err) // Upload data to the block blob - blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ - Metadata: map[string]string{"Foo": "Bar"}, - TagsMap: map[string]string{"Year": "2022"}, + blockBlobUploadOptions := blockblob.UploadOptions{ + Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, + Tags: map[string]string{"Year": "2022"}, } _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) handle(err) @@ -175,10 +176,9 @@ Examples downloadData, err := io.ReadAll(reader) handle(err) if string(downloadData) != uploadData { - handle(errors.New("Uploaded data should be same as downloaded data")) + handle(errors.New("uploaded data should be same as downloaded data")) } - if err = reader.Close(); err != nil { handle(err) return @@ -189,18 +189,15 @@ Examples // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. // PageResponse() can be used to iterate over the results of the specific page. // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. - pager := containerClient.ListBlobsFlat(nil) - for pager.NextPage(context.TODO()) { - resp := pager.PageResponse() + pager := containerClient.NewListBlobsFlatPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) for _, v := range resp.Segment.BlobItems { fmt.Println(*v.Name) } } - if err = pager.Err(); err != nil { - handle(err) - } - // Delete the blob. _, err = blockBlobClient.Delete(context.TODO(), nil) handle(err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 16e6cac066dbd..073de855b6170 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -7,14 +7,27 @@ package base import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" ) +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string +} + type Client[T any] struct { - inner *T - sharedKey *exported.SharedKeyCredential + inner *T + credential any + options *ClientOptions } func InnerClient[T any](client *Client[T]) *T { @@ -22,31 +35,55 @@ func InnerClient[T any](client *Client[T]) *T { } func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { - return client.sharedKey + switch cred := client.credential.(type) { + case *exported.SharedKeyCredential: + return cred + default: + return nil + } +} + +func Credential[T any](client *Client[T]) any { + return client.credential +} + +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } } func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewServiceClient(containerURL, azClient), + credential: credential, + options: options, } } -func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewContainerClient(containerURL, azClient), + credential: credential, + options: options, } } -func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewBlobClient(blobURL, azClient), + credential: credential, + options: options, } } @@ -57,29 +94,32 @@ type CompositeClient[T, U any] struct { } func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{inner: client.innerT}, client.innerU + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU } -func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewAppendBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewBlockBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewPageBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), sharedKey: sharedKey, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go new file mode 100644 index 0000000000000..c26c62aa883f7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -0,0 +1,280 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + batchIdPrefix = "batch_" + httpVersion = "HTTP/1.1" + httpNewline = "\r\n" +) + +// createBatchID is used for creating a new batch id which is used as batch boundary in the request body +func createBatchID() (string, error) { + batchID, err := uuid.New() + if err != nil { + return "", err + } + + return batchIdPrefix + batchID.String(), nil +} + +// buildSubRequest is used for building the sub-request. Example: +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func buildSubRequest(req *policy.Request) []byte { + var batchSubRequest strings.Builder + blobPath := req.Raw().URL.EscapedPath() + if len(req.Raw().URL.RawQuery) > 0 { + blobPath += "?" + req.Raw().URL.RawQuery + } + + batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) + + for k, v := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + continue + } + if len(v) > 0 { + batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) + } + } + + batchSubRequest.WriteString(httpNewline) + return []byte(batchSubRequest.String()) +} + +// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. +// +// Example of a sub-request in the batch request body: +// +// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 +// Content-Type: application/http +// Content-Transfer-Encoding: binary +// Content-ID: 0 +// +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { + batchID, err := createBatchID() + if err != nil { + return nil, "", err + } + + // Create a new multipart buffer + reqBody := &bytes.Buffer{} + writer := multipart.NewWriter(reqBody) + + // Set the boundary + err = writer.SetBoundary(batchID) + if err != nil { + return nil, "", err + } + + partHeaders := make(textproto.MIMEHeader) + partHeaders["Content-Type"] = []string{"application/http"} + partHeaders["Content-Transfer-Encoding"] = []string{"binary"} + var partWriter io.Writer + + for i, req := range bb.SubRequests { + if bb.AuthPolicy != nil { + _, err := bb.AuthPolicy.Do(req) + if err != nil && !strings.EqualFold(err.Error(), "no more policies") { + if log.Should(EventSubmitBatch) { + log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) + } + return nil, "", err + } + } + + partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} + partWriter, err = writer.CreatePart(partHeaders) + if err != nil { + return nil, "", err + } + + _, err = partWriter.Write(buildSubRequest(req)) + if err != nil { + return nil, "", err + } + } + + // Close the multipart writer + err = writer.Close() + if err != nil { + return nil, "", err + } + + return reqBody.Bytes(), batchID, nil +} + +// UpdateSubRequestHeaders updates the sub-request headers. +// Removes x-ms-version header. +func UpdateSubRequestHeaders(req *policy.Request) { + // remove x-ms-version header from the request header + for k := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + delete(req.Raw().Header, k) + } + } +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem struct { + ContentID *int + ContainerName *string + BlobName *string + RequestID *string + Version *string + Error error // nil error indicates that the batch sub-request operation is successful +} + +func getResponseBoundary(contentType *string) (string, error) { + if contentType == nil { + return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") + } + + _, params, err := mime.ParseMediaType(*contentType) + if err != nil { + return "", err + } + + if val, ok := params["boundary"]; ok { + return val, nil + } else { + return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) + } +} + +func getContentID(part *multipart.Part) (*int, error) { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + return nil, nil + } + + val, err := strconv.Atoi(strings.TrimSpace(contentID)) + if err != nil { + return nil, err + } + return &val, nil +} + +func getResponseHeader(key string, resp *http.Response) *string { + val := resp.Header.Get(key) + if val == "" { + return nil + } + return &val +} + +// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. +func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { + boundary, err := getResponseBoundary(contentType) + if err != nil { + return nil, err + } + + respReader := multipart.NewReader(respBody, boundary) + var responses []*BatchResponseItem + + for { + part, err := respReader.NextPart() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + batchSubResponse := &BatchResponseItem{} + batchSubResponse.ContentID, err = getContentID(part) + if err != nil { + return nil, err + } + + if batchSubResponse.ContentID != nil { + path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") + p := strings.Split(path, "/") + batchSubResponse.ContainerName = to.Ptr(p[0]) + batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) + } + + respBytes, err := io.ReadAll(part) + if err != nil { + return nil, err + } + respBytes = append(respBytes, byte('\n')) + buf := bytes.NewBuffer(respBytes) + resp, err := http.ReadResponse(bufio.NewReader(buf), nil) + // sub-response parsing error + if err != nil { + return nil, err + } + + batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) + batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) + + // sub-response failure + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if len(responses) == 0 && batchSubResponse.ContentID == nil { + // this case can happen when the parent request fails. + // For example, batch request having more than 256 sub-requests. + return nil, fmt.Errorf("%v", string(respBytes)) + } + + resp.Request = subRequests[*batchSubResponse.ContentID].Raw() + batchSubResponse.Error = runtime.NewResponseError(resp) + } + + responses = append(responses, batchSubResponse) + } + + if len(responses) != len(subRequests) { + return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) + } + + return responses, nil +} + +// not exported but used for batch request creation + +// BlobBatchBuilder is used for creating the blob batch request +type BlobBatchBuilder struct { + AuthPolicy policy.Policy + SubRequests []*policy.Request +} + +// BlobBatchOperationType defines the operation of the blob batch sub-requests. +type BlobBatchOperationType string + +const ( + BatchDeleteOperationType BlobBatchOperationType = "delete" + BatchSetTierOperationType BlobBatchOperationType = "set tier" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go index 9bc1ca47df84d..d0355727c9086 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -13,7 +13,7 @@ import ( // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange struct { Offset int64 Count int64 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go new file mode 100644 index 0000000000000..d775fb5c88aa0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azblob/log.go +const ( + // EventUpload is used when we compute number of blocks to upload and size of each block. + EventUpload log.Event = "azblob.Upload" + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch log.Event = "azblob.SubmitBatch" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go new file mode 100644 index 0000000000000..71473decab45c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go @@ -0,0 +1,71 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "net/http" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// ExpiryType defines values for ExpiryType +type ExpiryType interface { + Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) + notPubliclyImplementable() +} + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry +type ExpiryTypeAbsolute time.Time + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +type ExpiryTypeRelativeToNow time.Duration + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +type ExpiryTypeRelativeToCreation time.Duration + +// ExpiryTypeNever defines that the blob will be set to never expire +type ExpiryTypeNever struct { + // empty struct since NeverExpire expiry type does not require expiry time +} + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions struct { + // placeholder for future options +} + +func (e ExpiryTypeAbsolute) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsAbsolute, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(time.Time(e).UTC().Format(http.TimeFormat)), + } +} + +func (e ExpiryTypeAbsolute) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToNow) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToNow, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToNow) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToCreation) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToCreation, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToCreation) notPubliclyImplementable() {} + +func (e ExpiryTypeNever) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsNeverExpire, nil +} + +func (e ExpiryTypeNever) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index d1563105423fb..e4b076601f4e4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -11,7 +11,9 @@ import ( "crypto/hmac" "crypto/sha256" "encoding/base64" + "errors" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" "net/http" "net/url" "sort" @@ -172,7 +174,7 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er // Join the sorted key values separated by ',' // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) } } return cr.String(), nil @@ -195,6 +197,17 @@ func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { } func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } @@ -216,3 +229,10 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { } return response, err } + +func checkHTTPSForAuth(req *policy.Request) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go new file mode 100644 index 0000000000000..f3e571fa6a82d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "encoding/binary" + "hash/crc64" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 uint64 + +func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(c)) + cfg.SetCRC64(buf) + return rsc, nil +} + +func (TransferValidationTypeCRC64) notPubliclyImplementable() {} + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf, err := io.ReadAll(rsc) + if err != nil { + return nil, err + } + + crc := crc64.Checksum(buf, shared.CRC64Table) + return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg) + }) +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 []byte + +func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + cfg.SetMD5(c) + return rsc, nil +} + +func (TransferValidationTypeMD5) notPubliclyImplementable() {} + +type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + +func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + return t(rsc, cfg) +} + +func (transferValidationTypeFn) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 7b8ee601dc83c..6ee5452f807a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -7,6 +7,6 @@ package exported const ( - ModuleName = "azblob" - ModuleVersion = "v0.5.1" + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.3.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go index 3b6184fea67c4..288df7edda477 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *AppendBlobClient) Endpoint() string { return client.endpoint } -func (client *AppendBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index d4e2100e1972b..92dc7e2d31e5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,9 +7,9 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" credential-scope: "https://storage.azure.com/.default" -output-folder: . +output-folder: ../generated file-prefix: "zz_" openapi-type: "data-plane" verbose: true @@ -19,7 +19,43 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.43" +use: "@autorest/go@4.0.0-preview.61" +``` + +### Updating service version to 2023-11-03 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). + replaceAll(`2021-12-02`, `2023-11-03`); +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; ``` ### Remove pager methods and export various generated methods in container client @@ -30,7 +66,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); ``` @@ -43,7 +79,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); ``` @@ -244,7 +280,9 @@ directive: ``` yaml directive: -- from: zz_models.go +- from: + - zz_models.go + - zz_options.go where: $ transform: >- return $. @@ -299,6 +337,139 @@ directive: where: $ transform: >- return $. - replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\ - replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g) + replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"); +``` + +### Fix up x-ms-content-crc64 header response name + +``` yaml +directive: +- from: swagger-document + where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 + transform: > + $["x-ms-client-name"] = "ContentCRC64" +``` + +``` yaml +directive: +- rename-model: + from: BlobItemInternal + to: BlobItem +- rename-model: + from: BlobPropertiesInternal + to: BlobProperties +``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `where` parameter in blob filtering to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FilterBlobsWhere + transform: > + $.required = true; +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); ``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go index c3d3c2607c8cc..343073b2e660a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -6,12 +6,39 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) func (client *BlobClient) Endpoint() string { return client.endpoint } -func (client *BlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal +} + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go index a43e327ec44d4..873d9a419fb4c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *BlockBlobClient) Endpoint() string { return client.endpoint } -func (client *BlockBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go new file mode 100644 index 0000000000000..57f112001bd2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 0000000000000..8f2bbbb7cb816 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2023-11-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go index bbbf828a07af9..d43b2c7825904 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ContainerClient) Endpoint() string { return client.endpoint } -func (client *ContainerClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go new file mode 100644 index 0000000000000..aaef9f53ba67d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -0,0 +1,141 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + +type TransactionalContentSetter interface { + SetCRC64([]byte) + SetMD5([]byte) +} + +func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) { + a.TransactionalContentCRC64 = v +} + +func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) { + a.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) { + p.TransactionalContentCRC64 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { + p.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + SetSourceContentMD5(v []byte) +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + a.SourceContentcrc64 = v +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) { + a.SourceContentMD5 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + b.SourceContentcrc64 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) { + b.SourceContentMD5 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) { + p.SourceContentcrc64 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { + p.SourceContentMD5 = v +} + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go index 8a212cc3d4e22..a7c76208aa258 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *PageBlobClient) Endpoint() string { return client.endpoint } -func (client *PageBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go index 1f449b955e821..32c15a2b097ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ServiceClient) Endpoint() string { return client.endpoint } -func (client *ServiceClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index d0fe18c5d2724..797318611c37b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,54 +21,47 @@ import ( ) // AppendBlobClient contains the methods for the AppendBlob group. -// Don't use this type directly, use NewAppendBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type AppendBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { - client := &AppendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append // Block operation is permitted only if the blob was created with x-ms-blob-type set to // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err } - return client.appendBlockHandleResponse(resp) + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err } // appendBlockCreateRequest creates the AppendBlock request. -func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -109,10 +101,10 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -123,49 +115,47 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // appendBlockHandleResponse handles the AppendBlock response. func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.ContentMD5 = contentMD5 + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -174,16 +164,14 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -192,11 +180,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -205,36 +200,40 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // the contents are read from a source url. The Append Block operation is permitted only if the blob was // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// sourceURL - Specify a URL to the copy source. -// contentLength - The length of the request. -// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { +// +// Generated from API version 2023-11-03 +// - sourceURL - Specify a URL to the copy source. +// - contentLength - The length of the request. +// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err } - return client.appendBlockFromURLHandleResponse(resp) + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. -func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -281,10 +280,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -296,10 +295,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -307,7 +306,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -321,35 +320,30 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -358,16 +352,8 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -382,36 +368,53 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.IsServerEncrypted = &isServerEncrypted } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. -func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -440,7 +443,9 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -462,10 +467,10 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -476,7 +481,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -484,7 +489,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -499,15 +504,8 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content // createHandleResponse handles the Create response. func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -516,18 +514,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -535,6 +521,15 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -542,11 +537,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -554,25 +559,29 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version // or later. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock -// method. +// +// Generated from API version 2023-11-03 +// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err } - return client.sealHandleResponse(resp) + resp, err := client.sealHandleResponse(httpResp) + return resp, err } // sealCreateRequest creates the Seal request. @@ -587,7 +596,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -595,10 +604,10 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -616,25 +625,9 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * // sealHandleResponse handles the Seal response. func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -642,6 +635,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { @@ -649,5 +645,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.IsSealed = &isSealed } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 713fb52794bfe..fe568a96c7ac5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "net/http" "strconv" "strings" @@ -22,43 +22,36 @@ import ( ) // BlobClient contains the methods for the Blob group. -// Don't use this type directly, use NewBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlobClient creates a new instance of BlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { - client := &BlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. -// options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err } - return client.abortCopyFromURLHandleResponse(resp) + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. @@ -78,7 +71,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -92,12 +85,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -105,31 +92,44 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) AcquireLease(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) +// +// Generated from API version 2023-11-03 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -141,17 +141,15 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - if options != nil && options.Duration != nil { - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} - } + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} if options != nil && options.ProposedLeaseID != nil { req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -162,7 +160,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -173,6 +171,16 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, options // acquireLeaseHandleResponse handles the AcquireLease response. func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -186,43 +194,37 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -242,10 +244,10 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -256,7 +258,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -267,6 +269,16 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * // breakLeaseHandleResponse handles the BreakLease response. func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -285,47 +297,41 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -344,10 +350,10 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -358,7 +364,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -369,6 +375,16 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID // changeLeaseHandleResponse handles the ChangeLease response. func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -379,57 +395,52 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err } - return client.copyFromURLHandleResponse(resp) + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -442,17 +453,19 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-requires-sync"] = []string{"true"} if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Tier != nil { req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -461,10 +474,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -479,7 +492,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -490,7 +503,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -501,6 +514,12 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -508,34 +527,22 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour // copyFromURLHandleResponse handles the CopyFromURL response. func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.Date = &date + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val @@ -543,48 +550,67 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { +// +// Generated from API version 2023-11-03 +// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err } - return client.createSnapshotHandleResponse(resp) + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err } // createSnapshotCreateRequest creates the CreateSnapshot request. -func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -597,7 +623,9 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { @@ -613,10 +641,10 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -630,7 +658,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -641,12 +669,26 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio // createSnapshotHandleResponse handles the CreateSnapshot response. func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { @@ -654,32 +696,18 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } return result, nil } @@ -695,23 +723,27 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // All other operations on a soft-deleted blob or snapshot causes the service to // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -741,10 +773,10 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -755,7 +787,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -769,12 +801,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -782,27 +808,37 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err } - return client.deleteImmutabilityPolicyHandleResponse(resp) + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. @@ -817,7 +853,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -831,12 +867,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -844,34 +874,44 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { +// +// Generated from API version 2023-11-03 +// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err } - return client.downloadHandleResponse(resp) + resp, err := client.downloadHandleResponse(httpResp) + return resp, err } // downloadCreateRequest creates the Download request. -func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -910,10 +950,10 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -924,7 +964,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -935,76 +975,75 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl // downloadHandleResponse handles the Download response. func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientDownloadResponse{}, err } - result.LastModified = &lastModified - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1013,9 +1052,6 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1028,36 +1064,15 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1066,20 +1081,8 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1087,19 +1090,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.TagCount = &tagCount + result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1108,6 +1117,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.IsSealed = &isSealed } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1115,15 +1131,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1132,36 +1154,65 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -1174,7 +1225,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1182,15 +1233,12 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1198,11 +1246,14 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1210,28 +1261,32 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. -func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) if err != nil { return nil, err @@ -1260,10 +1315,10 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1274,7 +1329,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1285,82 +1340,61 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option // getPropertiesHandleResponse handles the GetProperties response. func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.LastModified = &lastModified + result.AccessTierChangeTime = &accessTierChangeTime } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CreationTime = &creationTime - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) - } + result.AccessTierInferred = &accessTierInferred } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsIncrementalCopy = &isIncrementalCopy + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { - result.DestinationSnapshot = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val } if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) @@ -1369,12 +1403,6 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentLength = &contentLength } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -1382,33 +1410,37 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1417,23 +1449,11 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.Date = &date } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1441,28 +1461,22 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val + result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierChangeTime = &accessTierChangeTime + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) @@ -1471,19 +1485,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsCurrentVersion = &isCurrentVersion } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ExpiresOn = &expiresOn + result.IsIncrementalCopy = &isIncrementalCopy } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1492,8 +1499,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsSealed = &isSealed } - if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { - result.RehydratePriority = &val + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) @@ -1502,15 +1513,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1519,28 +1536,70 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LegalHold = &legalHold } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } return result, nil } // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err } - return client.getTagsHandleResponse(resp) + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err } // getTagsCreateRequest creates the GetTags request. @@ -1561,7 +1620,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1581,12 +1640,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1594,6 +1647,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { return BlobClientGetTagsResponse{}, err } @@ -1602,28 +1661,32 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { +// +// Generated from API version 2023-11-03 +// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err } - return client.queryHandleResponse(resp) + resp, err := client.queryHandleResponse(httpResp) + return resp, err } // queryCreateRequest creates the Query request. -func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) if err != nil { return nil, err @@ -1651,10 +1714,10 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1665,13 +1728,16 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *options.QueryRequest) + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil } return req, nil } @@ -1679,65 +1745,75 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC // queryHandleResponse handles the Query response. func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientQueryResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1746,9 +1822,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1761,26 +1834,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1789,13 +1844,14 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -1804,48 +1860,62 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1863,10 +1933,10 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1877,7 +1947,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1888,6 +1958,16 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1898,44 +1978,38 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1953,10 +2027,10 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1967,7 +2041,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1978,6 +2052,16 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s // renewLeaseHandleResponse handles the RenewLease response. func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1991,43 +2075,37 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// expiryOptions - Required. Indicates mode of the expiry time -// options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +// +// Generated from API version 2023-11-03 +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err } - return client.setExpiryHandleResponse(resp) + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err } // setExpiryCreateRequest creates the SetExpiry request. @@ -2042,7 +2120,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2057,6 +2135,16 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti // setExpiryHandleResponse handles the SetExpiry response. func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2067,45 +2155,39 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } return result, nil } // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err } - return client.setHTTPHeadersHandleResponse(resp) + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. @@ -2139,10 +2221,10 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2156,7 +2238,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2167,16 +2249,6 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -2187,12 +2259,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2200,28 +2266,48 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err } - return client.setImmutabilityPolicyHandleResponse(resp) + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. @@ -2236,15 +2322,15 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2259,12 +2345,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2282,27 +2362,37 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// legalHold - Specified if a legal hold should be set on the blob. -// options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +// +// Generated from API version 2023-11-03 +// - legalHold - Specified if a legal hold should be set on the blob. +// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err } - return client.setLegalHoldHandleResponse(resp) + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err } // setLegalHoldCreateRequest creates the SetLegalHold request. @@ -2317,7 +2407,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2332,12 +2422,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2352,35 +2436,45 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC } result.LegalHold = &legalHold } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value // pairs // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. -func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -2393,7 +2487,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -2412,10 +2508,10 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2426,7 +2522,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2437,28 +2533,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options // setMetadataHandleResponse handles the SetMetadata response. func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2466,6 +2543,15 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -2473,35 +2559,49 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// tags - Blob tags -// options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - tags - Blob tags +// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err } - return client.setTagsHandleResponse(resp) + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err } // setTagsCreateRequest creates the SetTags request. @@ -2519,7 +2619,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } @@ -2536,7 +2636,10 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, tags) + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil } // setTagsHandleResponse handles the SetTags response. @@ -2545,12 +2648,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2558,6 +2655,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2566,24 +2669,28 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// tier - Indicates the tier to be set on the blob. -// options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - tier - Indicates the tier to be set on the blob. +// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err } - return client.setTierHandleResponse(resp) + resp, err := client.setTierHandleResponse(httpResp) + return resp, err } // setTierCreateRequest creates the SetTier request. @@ -2608,7 +2715,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2639,28 +2746,32 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err } - return client.startCopyFromURLHandleResponse(resp) + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. @@ -2676,7 +2787,9 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Tier != nil { @@ -2686,10 +2799,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -2701,10 +2814,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2719,7 +2832,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2730,7 +2843,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2745,6 +2858,22 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop // startCopyFromURLHandleResponse handles the StartCopyFromURL response. func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2755,9 +2884,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -2767,39 +2893,30 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +// +// Generated from API version 2023-11-03 +// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err } - return client.undeleteHandleResponse(resp) + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err } // undeleteCreateRequest creates the Undelete request. @@ -2814,7 +2931,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2828,12 +2945,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2841,5 +2952,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 5f2d1101613d9..b6115b50a6566 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,21 +21,10 @@ import ( ) // BlockBlobClient contains the methods for the BlockBlob group. -// Don't use this type directly, use NewBlockBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlockBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { - client := &BlockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. @@ -47,32 +35,36 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // the most recently uploaded version of the block, whichever list it may // belong to. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// blocks - Blob Blocks. -// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { +// +// Generated from API version 2023-11-03 +// - blocks - Blob Blocks. +// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err } - return client.commitBlockListHandleResponse(resp) + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err } // commitBlockListCreateRequest creates the CommitBlockList request. -func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -106,7 +98,9 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -131,10 +125,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -145,7 +139,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -153,7 +147,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -162,21 +156,24 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, blocks) + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil } // commitBlockListHandleResponse handles the CommitBlockList response. func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -185,68 +182,72 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.Date = &date } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. -// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err } - return client.getBlockListHandleResponse(resp) + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err } // getBlockListCreateRequest creates the GetBlockList request. @@ -271,7 +272,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -282,28 +283,35 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li // getBlockListHandleResponse handles the GetBlockList response. func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.LastModified = &lastModified + result.BlobContentLength = &blobContentLength } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.BlobContentLength = &blobContentLength + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -311,13 +319,6 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -330,37 +331,41 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // partial updates to a block blob’s contents using a source URL, use the Put // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL -// method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err } - return client.putBlobFromURLHandleResponse(resp) + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. -func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -392,7 +397,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -417,10 +424,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -432,10 +439,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -446,7 +453,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -463,6 +470,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -470,15 +480,8 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, // putBlobFromURLHandleResponse handles the PutBlobFromURL response. func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -487,18 +490,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -506,6 +497,15 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -513,44 +513,58 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// body - Initial data -// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) { +// +// Generated from API version 2023-11-03 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err } - return client.stageBlockHandleResponse(resp) + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err } // stageBlockCreateRequest creates the StageBlock request. -func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -584,17 +598,30 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // stageBlockHandleResponse handles the StageBlock response. func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -602,15 +629,6 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -618,12 +636,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.XMSContentCRC64 = xMSContentCRC64 + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -632,11 +649,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -644,36 +661,40 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal -// to 64 bytes in size. For a given blob, the length of the value specified for the blockid -// parameter must be the same size for each block. -// contentLength - The length of the request. -// sourceURL - Specify a URL to the copy source. -// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { +// +// Generated from API version 2023-11-03 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - sourceURL - Specify a URL to the copy source. +// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err } - return client.stageBlockFromURLHandleResponse(resp) + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. -func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -712,10 +733,10 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -723,7 +744,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -737,28 +758,22 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex // stageBlockFromURLHandleResponse handles the StageBlockFromURL response. func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -767,6 +782,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -774,11 +795,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -788,32 +809,36 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err } - return client.uploadHandleResponse(resp) + resp, err := client.uploadHandleResponse(httpResp) + return resp, err } // uploadCreateRequest creates the Upload request. -func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -845,7 +870,9 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -870,10 +897,10 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -884,7 +911,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -892,7 +919,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -900,22 +927,21 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadHandleResponse handles the Upload response. func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -924,18 +950,6 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -943,6 +957,15 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -950,11 +973,21 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 74e6cf1e85904..95af9e15447dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -13,6 +12,7 @@ type AccessTier string const ( AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" AccessTierCool AccessTier = "Cool" AccessTierHot AccessTier = "Hot" AccessTierP10 AccessTier = "P10" @@ -33,6 +33,7 @@ const ( func PossibleAccessTierValues() []AccessTier { return []AccessTier{ AccessTierArchive, + AccessTierCold, AccessTierCool, AccessTierHot, AccessTierP10, @@ -53,27 +54,28 @@ func PossibleAccessTierValues() []AccessTier { type AccountKind string const ( - AccountKindStorage AccountKind = "Storage" AccountKindBlobStorage AccountKind = "BlobStorage" - AccountKindStorageV2 AccountKind = "StorageV2" - AccountKindFileStorage AccountKind = "FileStorage" AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" ) // PossibleAccountKindValues returns the possible values for the AccountKind const type. func PossibleAccountKindValues() []AccountKind { return []AccountKind{ - AccountKindStorage, AccountKindBlobStorage, - AccountKindStorageV2, - AccountKindFileStorage, AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, } } type ArchiveStatus string const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" ) @@ -81,25 +83,41 @@ const ( // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. func PossibleArchiveStatusValues() []ArchiveStatus { return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot, } } +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string const ( - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" ) // PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { return []BlobGeoReplicationStatus{ - BlobGeoReplicationStatusLive, BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, BlobGeoReplicationStatusUnavailable, } } @@ -107,53 +125,53 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { type BlobType string const ( + BlobTypeAppendBlob BlobType = "AppendBlob" BlobTypeBlockBlob BlobType = "BlockBlob" BlobTypePageBlob BlobType = "PageBlob" - BlobTypeAppendBlob BlobType = "AppendBlob" ) // PossibleBlobTypeValues returns the possible values for the BlobType const type. func PossibleBlobTypeValues() []BlobType { return []BlobType{ + BlobTypeAppendBlob, BlobTypeBlockBlob, BlobTypePageBlob, - BlobTypeAppendBlob, } } type BlockListType string const ( + BlockListTypeAll BlockListType = "all" BlockListTypeCommitted BlockListType = "committed" BlockListTypeUncommitted BlockListType = "uncommitted" - BlockListTypeAll BlockListType = "all" ) // PossibleBlockListTypeValues returns the possible values for the BlockListType const type. func PossibleBlockListTypeValues() []BlockListType { return []BlockListType{ + BlockListTypeAll, BlockListTypeCommitted, BlockListTypeUncommitted, - BlockListTypeAll, } } type CopyStatusType string const ( - CopyStatusTypePending CopyStatusType = "pending" - CopyStatusTypeSuccess CopyStatusType = "success" CopyStatusTypeAborted CopyStatusType = "aborted" CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" ) // PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. func PossibleCopyStatusTypeValues() []CopyStatusType { return []CopyStatusType{ - CopyStatusTypePending, - CopyStatusTypeSuccess, CopyStatusTypeAborted, CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, } } @@ -190,15 +208,15 @@ func PossibleDeleteTypeValues() []DeleteType { type EncryptionAlgorithmType string const ( - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" ) // PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return []EncryptionAlgorithmType{ - EncryptionAlgorithmTypeNone, EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, } } @@ -221,50 +239,65 @@ func PossibleExpiryOptionsValues() []ExpiryOptions { } } +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + type ImmutabilityPolicyMode string const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ) // PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, ImmutabilityPolicyModeMutable, ImmutabilityPolicyModeUnlocked, - ImmutabilityPolicyModeLocked, } } type ImmutabilityPolicySetting string const ( - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ) // PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { return []ImmutabilityPolicySetting{ - ImmutabilityPolicySettingUnlocked, ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, } } type LeaseDurationType string const ( - LeaseDurationTypeInfinite LeaseDurationType = "infinite" LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" ) // PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. func PossibleLeaseDurationTypeValues() []LeaseDurationType { return []LeaseDurationType{ - LeaseDurationTypeInfinite, LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, } } @@ -272,20 +305,20 @@ type LeaseStateType string const ( LeaseStateTypeAvailable LeaseStateType = "available" - LeaseStateTypeLeased LeaseStateType = "leased" - LeaseStateTypeExpired LeaseStateType = "expired" LeaseStateTypeBreaking LeaseStateType = "breaking" LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" ) // PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. func PossibleLeaseStateTypeValues() []LeaseStateType { return []LeaseStateType{ LeaseStateTypeAvailable, - LeaseStateTypeLeased, - LeaseStateTypeExpired, LeaseStateTypeBreaking, LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, } } @@ -309,14 +342,14 @@ type ListBlobsIncludeItem string const ( ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" - ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" - ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" - ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" ) // PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. @@ -324,30 +357,30 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { return []ListBlobsIncludeItem{ ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions, - ListBlobsIncludeItemTags, - ListBlobsIncludeItemImmutabilitypolicy, - ListBlobsIncludeItemLegalhold, - ListBlobsIncludeItemDeletedwithversions, } } type ListContainersIncludeType string const ( - ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeSystem ListContainersIncludeType = "system" ) // PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { return []ListContainersIncludeType{ - ListContainersIncludeTypeMetadata, ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, ListContainersIncludeTypeSystem, } } @@ -404,18 +437,18 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { type QueryFormatType string const ( + QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeDelimited QueryFormatType = "delimited" QueryFormatTypeJSON QueryFormatType = "json" - QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeParquet QueryFormatType = "parquet" ) // PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. func PossibleQueryFormatTypeValues() []QueryFormatType { return []QueryFormatType{ + QueryFormatTypeArrow, QueryFormatTypeDelimited, QueryFormatTypeJSON, - QueryFormatTypeArrow, QueryFormatTypeParquet, } } @@ -440,38 +473,38 @@ func PossibleRehydratePriorityValues() []RehydratePriority { type SKUName string const ( - SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" SKUNameStandardRAGRS SKUName = "Standard_RAGRS" SKUNameStandardZRS SKUName = "Standard_ZRS" - SKUNamePremiumLRS SKUName = "Premium_LRS" ) // PossibleSKUNameValues returns the possible values for the SKUName const type. func PossibleSKUNameValues() []SKUName { return []SKUName{ - SKUNameStandardLRS, + SKUNamePremiumLRS, SKUNameStandardGRS, + SKUNameStandardLRS, SKUNameStandardRAGRS, SKUNameStandardZRS, - SKUNamePremiumLRS, } } type SequenceNumberActionType string const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" SequenceNumberActionTypeMax SequenceNumberActionType = "max" SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" - SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" ) // PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, SequenceNumberActionTypeMax, SequenceNumberActionTypeUpdate, - SequenceNumberActionTypeIncrement, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 31e671f148499..dbc2a293ec671 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -16,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "io" "net/http" "strconv" @@ -24,46 +24,42 @@ import ( ) // ContainerClient contains the methods for the Container group. -// Don't use this type directly, use NewContainerClient() instead. +// Don't use this type directly, use a constructor function instead. type ContainerClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewContainerClient creates a new instance of ContainerClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { - client := &ContainerClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *ContainerClient) AcquireLease(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { - req, err := client.acquireLeaseCreateRequest(ctx, options, modifiedAccessConditions) +// +// Generated from API version 2023-11-03 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. -func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -76,19 +72,17 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, op } req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - if options != nil && options.Duration != nil { - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(*options.Duration), 10)} - } + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} if options != nil && options.ProposedLeaseID != nil { req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -99,6 +93,16 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, op // acquireLeaseHandleResponse handles the AcquireLease response. func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -112,44 +116,38 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -170,12 +168,12 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -186,6 +184,16 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti // breakLeaseHandleResponse handles the BreakLease response. func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -204,48 +212,42 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed -// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID -// string formats. -// options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -265,12 +267,12 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -281,6 +283,16 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea // changeLeaseHandleResponse handles the ChangeLease response. func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -294,48 +306,42 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Create - creates a new container under the specified account. If the container with the same name already exists, the operation // fails // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerClientCreateResponse, error) { - req, err := client.createCreateRequest(ctx, options, containerCpkScopeInfo) +// +// Generated from API version 2023-11-03 +// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. -func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -348,21 +354,23 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options req.Raw().URL.RawQuery = reqQP.Encode() if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { - req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCpkScopeInfo.DefaultEncryptionScope} + if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} } - if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { - req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)} + if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil @@ -371,6 +379,16 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options // createHandleResponse handles the Create response. func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -381,45 +399,39 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } return result, nil } // Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -438,12 +450,12 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -457,42 +469,127 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } + return result, nil +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return ContainerClientDeleteResponse{}, err + return ContainerClientFilterBlobsResponse{}, err } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } return result, nil } // GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err } - return client.getAccessPolicyHandleResponse(resp) + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. @@ -511,7 +608,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -525,6 +622,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -535,22 +642,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -559,22 +656,26 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo -// method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -587,7 +688,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -595,15 +696,12 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -611,11 +709,14 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -623,22 +724,26 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -656,7 +761,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -667,42 +772,12 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o // getPropertiesHandleResponse handles the GetProperties response. func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { result := ContainerClientGetPropertiesResponse{} - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) - } - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -710,8 +785,18 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) @@ -727,31 +812,52 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.HasLegalHold = &hasLegalHold } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment -// method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +// // listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) @@ -777,7 +883,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -788,17 +894,11 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -807,6 +907,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { return ContainerClientListBlobFlatSegmentResponse{}, err } @@ -814,36 +920,29 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container -// If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that -// acts as a placeholder for all blobs whose names begin with the same substring up to the -// appearance of the delimiter character. The delimiter may be a single character or a string. -// options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment -// method. +// +// Generated from API version 2023-11-03 +// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.pl.Do(req) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) - } return client.ListBlobHierarchySegmentHandleResponse(resp) }, }) @@ -875,7 +974,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -886,17 +985,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -905,6 +998,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -914,23 +1013,27 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -949,12 +1052,12 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -965,6 +1068,16 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -975,43 +1088,37 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// sourceContainerName - Required. Specifies the name of the container to rename. -// options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +// +// Generated from API version 2023-11-03 +// - sourceContainerName - Required. Specifies the name of the container to rename. +// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err } - return client.renameHandleResponse(resp) + resp, err := client.renameHandleResponse(httpResp) + return resp, err } // renameCreateRequest creates the Rename request. @@ -1027,7 +1134,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1045,12 +1152,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1058,29 +1159,39 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// leaseID - Specifies the current lease ID on the resource. -// options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1099,12 +1210,12 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1115,6 +1226,16 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas // renewLeaseHandleResponse handles the RenewLease response. func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1128,42 +1249,36 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err } - return client.restoreHandleResponse(resp) + resp, err := client.restoreHandleResponse(httpResp) + return resp, err } // restoreCreateRequest creates the Restore request. @@ -1179,7 +1294,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1199,12 +1314,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1212,31 +1321,41 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// containerACL - the acls for the container -// options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - containerACL - the acls for the container +// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err } - return client.setAccessPolicyHandleResponse(resp) + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. @@ -1259,12 +1378,12 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1273,12 +1392,25 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1289,44 +1421,38 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } return result, nil } // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -1347,13 +1473,15 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1364,6 +1492,16 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt // setMetadataHandleResponse handles the SetMetadata response. func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1374,46 +1512,40 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } return result, nil } // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// body - Initial data -// options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -1432,12 +1564,15 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index f8df7338b2f85..7251de83952bb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -26,89 +25,6 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. type ArrowConfiguration struct { // REQUIRED @@ -124,415 +40,19 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - type BlobFlatListSegment struct { // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` -} - -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string + BlobItems []*BlobItem `xml:"Blob"` } type BlobHierarchyListSegment struct { // REQUIRED - BlobItems []*BlobItemInternal `xml:"Blob"` - BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` + BlobItems []*BlobItem `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` } -// BlobItemInternal - An Azure Storage blob -type BlobItemInternal struct { +// BlobItem - An Azure Storage blob +type BlobItem struct { // REQUIRED Deleted *bool `xml:"Deleted"` @@ -540,7 +60,7 @@ type BlobItemInternal struct { Name *string `xml:"Name"` // REQUIRED; Properties of a blob - Properties *BlobPropertiesInternal `xml:"Properties"` + Properties *BlobProperties `xml:"Properties"` // REQUIRED Snapshot *string `xml:"Snapshot"` @@ -558,13 +78,21 @@ type BlobItemInternal struct { VersionID *string `xml:"VersionId"` } +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + type BlobPrefix struct { // REQUIRED Name *string `xml:"Name"` } -// BlobPropertiesInternal - Properties of a blob -type BlobPropertiesInternal struct { +// BlobProperties - Properties of a blob +type BlobProperties struct { // REQUIRED ETag *azcore.ETag `xml:"Etag"` @@ -640,141 +168,6 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - type BlockList struct { CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` @@ -794,254 +187,6 @@ type ClearRange struct { Start *int64 `xml:"Start"` } -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease - // can be between 15 and 60 seconds. A lease duration cannot be changed using - // renew or change. - Duration *int32 - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - // ContainerItem - An Azure Storage container type ContainerItem struct { // REQUIRED @@ -1078,11 +223,11 @@ type ContainerProperties struct { RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` } -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another // domain. Web browsers implement a security restriction known as same-origin policy that // prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin // domain) to call APIs in another domain -type CorsRule struct { +type CORSRule struct { // REQUIRED; the request headers that the origin domain may specify on the CORS request. AllowedHeaders *string `xml:"AllowedHeaders"` @@ -1103,27 +248,6 @@ type CorsRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CpkInfo contains a group of parameters for the BlobClient.Download method. -type CpkInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CpkScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. type DelimitedTextConfiguration struct { // The string used to separate columns. @@ -1148,10 +272,12 @@ type FilterBlobItem struct { ContainerName *string `xml:"ContainerName"` // REQUIRED - Name *string `xml:"Name"` + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` // Blob tags - Tags *BlobTags `xml:"Tags"` + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` } // FilterBlobSegment - The result of a Filter Blobs API call @@ -1193,12 +319,6 @@ type KeyInfo struct { Start *string `xml:"Start"` } -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { // REQUIRED @@ -1278,194 +398,6 @@ type Metrics struct { Version *string `xml:"Version"` } -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - // PageList - the list of pages type PageList struct { ClearRange []*ClearRange `xml:"ClearRange"` @@ -1495,7 +427,7 @@ type QueryFormat struct { JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` // parquet configuration - ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` + ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` } // QueryRequest - Groups the set of query request settings. @@ -1528,122 +460,6 @@ type RetentionPolicy struct { Days *int32 `xml:"Days"` } -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - // SignedIdentifier - signed identifier type SignedIdentifier struct { // REQUIRED; An Access policy @@ -1653,20 +469,6 @@ type SignedIdentifier struct { ID *string `xml:"Id"` } -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // REQUIRED; Indicates whether this account is hosting a static website @@ -1683,13 +485,13 @@ type StaticWebsite struct { } type StorageError struct { - Message *string `json:"Message,omitempty"` + Message *string } // StorageServiceProperties - Storage Service Properties. type StorageServiceProperties struct { // The set of CORS rules. - Cors []*CorsRule `xml:"Cors>CorsRule"` + CORS []*CORSRule `xml:"Cors>CorsRule"` // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible // values include version 2008-10-27 and all more recent versions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index 770e41a080bd7..7e094db8730dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -20,31 +19,31 @@ import ( ) // MarshalXML implements the xml.Marshaller interface for type AccessPolicy. -func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. -func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(a), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } a.Expiry = (*time.Time)(aux.Expiry) @@ -53,7 +52,7 @@ func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro } // MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. -func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ArrowConfiguration aux := &struct { *alias @@ -64,31 +63,31 @@ func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) e if a.Schema != nil { aux.Schema = &a.Schema } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. -func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobFlatListSegment aux := &struct { *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobItems *[]*BlobItem `xml:"Blob"` }{ alias: (*alias)(&b), } if b.BlobItems != nil { aux.BlobItems = &b.BlobItems } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. -func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobHierarchyListSegment aux := &struct { *alias - BlobItems *[]*BlobItemInternal `xml:"Blob"` - BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + BlobItems *[]*BlobItem `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` }{ alias: (*alias)(&b), } @@ -98,77 +97,59 @@ func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElem if b.BlobPrefixes != nil { aux.BlobPrefixes = &b.BlobPrefixes } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. -func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobItemInternal +// MarshalXML implements the xml.Marshaller interface for type BlobProperties. +func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobProperties aux := &struct { *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := d.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - -// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. -func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - type alias BlobPropertiesInternal - aux := &struct { - *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), } if b.ContentMD5 != nil { encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) aux.ContentMD5 = &encodedContentMD5 } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. -func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type alias BlobPropertiesInternal +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties. +func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(b), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) @@ -188,7 +169,7 @@ func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartEle } // MarshalXML implements the xml.Marshaller interface for type BlobTags. -func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "Tags" type alias BlobTags aux := &struct { @@ -200,11 +181,11 @@ func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if b.BlobTagSet != nil { aux.BlobTagSet = &b.BlobTagSet } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlockList. -func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlockList aux := &struct { *alias @@ -219,11 +200,11 @@ func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if b.UncommittedBlocks != nil { aux.UncommittedBlocks = &b.UncommittedBlocks } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type BlockLookupList. -func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "BlockList" type alias BlockLookupList aux := &struct { @@ -243,11 +224,11 @@ func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) erro if b.Uncommitted != nil { aux.Uncommitted = &b.Uncommitted } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. -func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (c *ContainerItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias ContainerItem aux := &struct { *alias @@ -255,7 +236,7 @@ func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err }{ alias: (*alias)(c), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } c.Metadata = (map[string]*string)(aux.Metadata) @@ -263,31 +244,31 @@ func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err } // MarshalXML implements the xml.Marshaller interface for type ContainerProperties. -func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. -func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(c), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } c.DeletedTime = (*time.Time)(aux.DeletedTime) @@ -296,7 +277,7 @@ func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElemen } // MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. -func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias FilterBlobSegment aux := &struct { *alias @@ -307,32 +288,32 @@ func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) er if f.Blobs != nil { aux.Blobs = &f.Blobs } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type GeoReplication. -func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. -func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(g), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } g.LastSyncTime = (*time.Time)(aux.LastSyncTime) @@ -340,7 +321,7 @@ func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er } // MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. -func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (l ListContainersSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias ListContainersSegmentResponse aux := &struct { *alias @@ -351,11 +332,11 @@ func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.Star if l.ContainerItems != nil { aux.ContainerItems = &l.ContainerItems } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type PageList. -func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias PageList aux := &struct { *alias @@ -370,11 +351,11 @@ func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if p.PageRange != nil { aux.PageRange = &p.PageRange } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type QueryRequest. -func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { start.Name.Local = "QueryRequest" type alias QueryRequest aux := &struct { @@ -382,12 +363,12 @@ func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { }{ alias: (*alias)(&q), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalJSON implements the json.Marshaller interface for type StorageError. func (s StorageError) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) + objectMap := make(map[string]any) populate(objectMap, "Message", s.Message) return json.Marshal(objectMap) } @@ -413,46 +394,46 @@ func (s *StorageError) UnmarshalJSON(data []byte) error { } // MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. -func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias StorageServiceProperties aux := &struct { *alias - Cors *[]*CorsRule `xml:"Cors>CorsRule"` + CORS *[]*CORSRule `xml:"Cors>CorsRule"` }{ alias: (*alias)(&s), } - if s.Cors != nil { - aux.Cors = &s.Cors + if s.CORS != nil { + aux.CORS = &s.CORS } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. -func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { +func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), } - return e.EncodeElement(aux, start) + return enc.EncodeElement(aux, start) } // UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. -func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { +func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(u), } - if err := d.DecodeElement(aux, &start); err != nil { + if err := dec.DecodeElement(aux, &start); err != nil { return err } u.SignedExpiry = (*time.Time)(aux.SignedExpiry) @@ -460,7 +441,7 @@ func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) return nil } -func populate(m map[string]interface{}, k string, v interface{}) { +func populate(m map[string]any, k string, v any) { if v == nil { return } else if azcore.IsNullValue(v) { @@ -470,7 +451,17 @@ func populate(m map[string]interface{}, k string, v interface{}) { } } -func unpopulate(data json.RawMessage, fn string, v interface{}) error { +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { if data == nil { return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 0000000000000..216f8b73ae989 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1469 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index 2747e40812084..cb6a19f7a335b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,51 +21,44 @@ import ( ) // PageBlobClient contains the methods for the PageBlob group. -// Don't use this type directly, use NewPageBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type PageBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { - client := &PageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err } - return client.clearPagesHandleResponse(resp) + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err } // clearPagesCreateRequest creates the ClearPages request. -func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -107,10 +99,10 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -121,7 +113,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -132,15 +124,22 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte // clearPagesHandleResponse handles the ClearPages response. func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -149,22 +148,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 + result.Date = &date } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -172,13 +171,6 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } return result, nil } @@ -188,26 +180,30 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // be read or copied from as usual. This API is supported since REST version // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies -// a page blob snapshot. The value should be URL-encoded as it would appear in a request -// URI. The source blob must either be public or must be authenticated via a shared access signature. -// options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err } - return client.copyIncrementalHandleResponse(resp) + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err } // copyIncrementalCreateRequest creates the CopyIncremental request. @@ -223,10 +219,10 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, } req.Raw().URL.RawQuery = reqQP.Encode() if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -238,7 +234,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-copy-source"] = []string{copySource} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -249,6 +245,22 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, // copyIncrementalHandleResponse handles the CopyIncremental response. func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -259,60 +271,48 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. -func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -344,7 +344,9 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { - req.Raw().Header["x-ms-meta-"+k] = []string{v} + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } } } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { @@ -366,10 +368,10 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -384,7 +386,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -392,7 +394,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -407,15 +409,8 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe // createHandleResponse handles the Create response. func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -424,18 +419,6 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -443,6 +426,15 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -450,45 +442,49 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob -// If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.GetPageRanges method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ More: func(page PageBlobClientGetPageRangesResponse) bool { return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) - } - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } - resp, err := client.pl.Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesHandleResponse(resp) }, }) @@ -522,10 +518,10 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -536,7 +532,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -547,16 +543,6 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op // GetPageRangesHandleResponse handles the GetPageRanges response. func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -567,12 +553,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -580,6 +560,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -588,35 +584,28 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. -// If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.GetPageRangesDiff -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ More: func(page PageBlobClientGetPageRangesDiffResponse) bool { return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) - } - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } - resp, err := client.pl.Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesDiffHandleResponse(resp) }, }) @@ -656,10 +645,10 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -670,7 +659,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -681,16 +670,6 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -701,12 +680,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -714,6 +687,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -722,31 +711,35 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned -// to a 512-byte boundary. -// options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { +// +// Generated from API version 2023-11-03 +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err } - return client.resizeHandleResponse(resp) + resp, err := client.resizeHandleResponse(httpResp) + return resp, err } // resizeCreateRequest creates the Resize request. -func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -773,10 +766,10 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -788,7 +781,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -799,16 +792,6 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte // resizeHandleResponse handles the Resize response. func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -819,12 +802,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -832,31 +809,51 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to -// page blobs only. This property indicates how the service should modify the blob's sequence number -// options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// +// Generated from API version 2023-11-03 +// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err } - return client.updateSequenceNumberHandleResponse(resp) + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err } // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. @@ -875,10 +872,10 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -893,7 +890,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -904,16 +901,6 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -924,12 +911,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -937,38 +918,58 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// body - Initial data -// options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err } - return client.uploadPagesHandleResponse(resp) + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err } // uploadPagesCreateRequest creates the UploadPages request. -func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1015,10 +1016,10 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1029,56 +1030,43 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadPagesHandleResponse handles the UploadPages response. func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.LastModified = &lastModified + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1087,6 +1075,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1094,11 +1091,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1106,40 +1110,44 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from // a URL // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// sourceURL - Specify a URL to the copy source. -// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header -// and x-ms-range/Range destination range header. -// contentLength - The length of the request. -// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end -// is required. -// options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL -// method. -// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method. -// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages -// method. -// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL -// method. -func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { +// +// Generated from API version 2023-11-03 +// - sourceURL - Specify a URL to the copy source. +// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// - contentLength - The length of the request. +// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err } - return client.uploadPagesFromURLHandleResponse(resp) + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. -func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -1186,10 +1194,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1201,10 +1209,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -1212,7 +1220,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1226,42 +1234,26 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.XMSContentCRC64 = xMSContentCRC64 + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1270,6 +1262,15 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1277,11 +1278,18 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 3d2d5d2f953e3..738d23c8f19e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -23,6 +22,9 @@ type AppendBlobClientAppendBlockFromURLResponse struct { // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. BlobCommittedBlockCount *int32 + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -49,9 +51,6 @@ type AppendBlobClientAppendBlockFromURLResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. @@ -65,6 +64,9 @@ type AppendBlobClientAppendBlockResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -91,9 +93,6 @@ type AppendBlobClientAppendBlockResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. @@ -248,6 +247,9 @@ type BlobClientCopyFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -263,6 +265,9 @@ type BlobClientCopyFromURLResponse struct { // ETag contains the information returned from the ETag header response. ETag *azcore.ETag + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -274,9 +279,6 @@ type BlobClientCopyFromURLResponse struct { // VersionID contains the information returned from the x-ms-version-id header response. VersionID *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. @@ -407,6 +409,9 @@ type BlobClientDownloadResponse struct { // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. CopyStatusDescription *string + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + // Date contains the information returned from the Date header response. Date *time.Time @@ -456,13 +461,13 @@ type BlobClientDownloadResponse struct { LegalHold *bool // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. ObjectReplicationPolicyID *string // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]string + ObjectReplicationRules map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -624,13 +629,13 @@ type BlobClientGetPropertiesResponse struct { LegalHold *bool // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. ObjectReplicationPolicyID *string // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]string + ObjectReplicationRules map[string]*string // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. RehydratePriority *string @@ -650,18 +655,20 @@ type BlobClientGetPropertiesResponse struct { // BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. type BlobClientGetTagsResponse struct { + // Blob tags BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlobClientQueryResponse contains the response from method BlobClient.Query. @@ -760,7 +767,7 @@ type BlobClientQueryResponse struct { LeaseStatus *LeaseStatusType // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1008,6 +1015,9 @@ type BlockBlobClientCommitBlockListResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1037,37 +1047,35 @@ type BlockBlobClientCommitBlockListResponse struct { // VersionID contains the information returned from the x-ms-version-id header response. VersionID *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. type BlockBlobClientGetBlockListResponse struct { BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. @@ -1111,6 +1119,9 @@ type BlockBlobClientStageBlockFromURLResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1131,9 +1142,6 @@ type BlockBlobClientStageBlockFromURLResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. @@ -1141,6 +1149,9 @@ type BlockBlobClientStageBlockResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1161,9 +1172,6 @@ type BlockBlobClientStageBlockResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. @@ -1310,31 +1318,49 @@ type ContainerClientDeleteResponse struct { Version *string } +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + BlobPublicAccess *PublicAccessType // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // a collection of signed identifiers SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. @@ -1401,7 +1427,7 @@ type ContainerClientGetPropertiesResponse struct { LeaseStatus *LeaseStatusType // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]string + Metadata map[string]*string // RequestID contains the information returned from the x-ms-request-id header response. RequestID *string @@ -1410,42 +1436,46 @@ type ContainerClientGetPropertiesResponse struct { Version *string } -// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.ListBlobFlatSegment. +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } -// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.ListBlobHierarchySegment. +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. @@ -1588,6 +1618,9 @@ type PageBlobClientClearPagesResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1605,9 +1638,6 @@ type PageBlobClientClearPagesResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. @@ -1673,54 +1703,58 @@ type PageBlobClientCreateResponse struct { VersionID *string } -// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.GetPageRangesDiff. +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } -// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.GetPageRanges. +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. type PageBlobClientGetPageRangesResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. @@ -1776,6 +1810,9 @@ type PageBlobClientUploadPagesFromURLResponse struct { // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. BlobSequenceNumber *int64 + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1802,9 +1839,6 @@ type PageBlobClientUploadPagesFromURLResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. @@ -1815,6 +1849,9 @@ type PageBlobClientUploadPagesResponse struct { // ClientRequestID contains the information returned from the x-ms-client-request-id header response. ClientRequestID *string + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + // ContentMD5 contains the information returned from the Content-MD5 header response. ContentMD5 []byte @@ -1841,25 +1878,24 @@ type PageBlobClientUploadPagesResponse struct { // Version contains the information returned from the x-ms-version header response. Version *string - - // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - XMSContentCRC64 []byte } // ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. @@ -1888,60 +1924,68 @@ type ServiceClientGetAccountInfoResponse struct { // ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } -// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.ListContainersSegment. +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index 1cb779d848128..c792fbf094bfb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -3,15 +3,15 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -22,46 +22,40 @@ import ( ) // ServiceClient contains the methods for the Service group. -// Don't use this type directly, use NewServiceClient() instead. +// Don't use this type directly, use a constructor function instead. type ServiceClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewServiceClient creates a new instance of ServiceClient with the specified values. -// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// pl - the pipeline used for sending requests and handling responses. -func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { - client := &ServiceClient{ - endpoint: endpoint, - pl: pl, - } - return client } // FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search // expression. Filter blobs searches across all containers within a storage account but can // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { - req, err := client.filterBlobsCreateRequest(ctx, options) +// +// Generated from API version 2023-11-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. -func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) if err != nil { return nil, err @@ -71,17 +65,18 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, optio if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if options != nil && options.Where != nil { - reqQP.Set("where", *options.Where) - } + reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -95,12 +90,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -108,6 +97,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -116,21 +111,25 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -143,7 +142,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -151,15 +150,12 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -167,12 +163,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { @@ -180,27 +170,40 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -216,7 +219,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -245,21 +248,25 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err } - return client.getStatisticsHandleResponse(resp) + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err } // getStatisticsCreateRequest creates the GetStatistics request. @@ -275,7 +282,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -289,12 +296,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -302,6 +303,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -311,23 +318,27 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// keyInfo - Key information -// options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey -// method. +// +// Generated from API version 2023-11-03 +// - keyInfo - Key information +// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err } - return client.getUserDelegationKeyHandleResponse(resp) + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. @@ -343,12 +354,15 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, keyInfo) + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. @@ -357,12 +371,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -370,6 +378,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -378,10 +392,11 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account -// If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment -// method. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +// // listContainersSegmentCreateRequest creates the ListContainersSegment request. func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) @@ -406,7 +421,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -435,22 +450,26 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// storageServiceProperties - The StorageService properties. -// options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +// +// Generated from API version 2023-11-03 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err } - return client.setPropertiesHandleResponse(resp) + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err } // setPropertiesCreateRequest creates the SetProperties request. @@ -466,12 +485,15 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, storageServiceProperties) + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil } // setPropertiesHandleResponse handles the SetProperties response. @@ -491,25 +513,29 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. -// Generated from API version 2020-10-02 -// contentLength - The length of the request. -// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header -// value: multipart/mixed; boundary=batch_ -// body - Initial data -// options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -527,12 +553,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 4b4d51aa39944..586650329724f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,29 +14,29 @@ import ( ) const ( - rfc1123JSON = `"` + time.RFC1123 + `"` + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` ) -type timeRFC1123 time.Time +type dateTimeRFC1123 time.Time -func (t timeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(rfc1123JSON)) +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) return b, nil } -func (t timeRFC1123) MarshalText() ([]byte, error) { +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { b := []byte(time.Time(t).Format(time.RFC1123)) return b, nil } -func (t *timeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) - *t = timeRFC1123(p) +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) return err } -func (t *timeRFC1123) UnmarshalText(data []byte) error { +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { p, err := time.Parse(time.RFC1123, string(data)) - *t = timeRFC1123(p) + *t = dateTimeRFC1123(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 1ce9d621164ed..82b370133fac4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,45 +14,45 @@ import ( "time" ) -const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` -) - // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) -type timeRFC3339 time.Time +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON if tzOffsetRegex.Match(data) { - layout = rfc3339JSON + layout = dateTimeJSON } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime if tzOffsetRegex.Match(data) { layout = time.RFC3339Nano } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 144ea18e1abae..1bd0e4de05a88 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -3,14 +3,16 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" "strings" ) @@ -19,22 +21,32 @@ type additionalProperties map[string]*string // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) - break + tokValue = "" case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: if tokName == "" { continue } if *ap == nil { *ap = additionalProperties{} } - s := string(tt) - (*ap)[tokName] = &s + (*ap)[tokName] = to.Ptr(tokValue) tokName = "" - break } } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index a86fc582c52f5..c1b3a3d272962 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -11,12 +11,17 @@ import ( "errors" ) +const ( + DefaultConcurrency = 5 +) + // BatchTransferOptions identifies options used by doBatchTransfer. type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 + NumChunks uint16 Concurrency uint16 - Operation func(offset int64, chunkSize int64, ctx context.Context) error + Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string } @@ -28,13 +33,12 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } if o.Concurrency == 0 { - o.Concurrency = 5 // default concurrency + o.Concurrency = DefaultConcurrency // default concurrency } // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,23 +54,22 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize - if chunkNum == numChunks-1 { // Last chunk + if chunkNum == o.NumChunks-1 { // Last chunk curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total } offset := int64(chunkNum) * o.ChunkSize - operationChannel <- func() error { - return o.Operation(offset, curChunkSize, ctx) + return o.Operation(ctx, offset, curChunkSize) } } close(operationChannel) // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 0000000000000..e3aa4a4886dec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go new file mode 100644 index 0000000000000..1c81b9db9b862 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strings" +) + +type storageAuthorizer struct { + scopes []string + tenantID string +} + +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: s.onRequest, + OnChallenge: s.onChallenge, + }, + }) +} + +func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + err := s.parseChallenge(resp) + if err != nil { + return err + } + // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) >= 3 { + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant + } else { + return "" + } +} + +func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + s.tenantID = parseTenant(vals["authorization_uri"]) + + scope := vals["resource_id"] + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + s.scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go new file mode 100644 index 0000000000000..cdcadf3116078 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) +// +build go1.18 +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type Mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return Mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go new file mode 100644 index 0000000000000..ef9fdc2a1f075 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// Mmb is a memory mapped buffer +type Mmb []byte + +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := Mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = Mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 48a5ad842aec2..c7922076f3cfa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -9,6 +9,7 @@ package shared import ( "errors" "fmt" + "hash/crc64" "io" "net" "net/url" @@ -37,8 +38,14 @@ const ( HeaderIfNoneMatch = "If-None-Match" HeaderIfUnmodifiedSince = "If-Unmodified-Since" HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" ) +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + // CopyOptions returns a zero-value T if opts is nil. // If opts is not nil, a copy is made and its address returned. func CopyOptions[T any](opts *T) *T { @@ -80,22 +87,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err connStrMap[parts[0]] = parts[1] } - accountName, ok := connStrMap["AccountName"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountName") - } - - accountKey, ok := connStrMap["AccountKey"] - if !ok { - sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") - } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), - }, nil - } - protocol, ok := connStrMap["DefaultEndpointsProtocol"] if !ok { protocol = defaultScheme @@ -106,26 +97,44 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err suffix = defaultSuffix } - if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { return ParsedConnectionString{ - ServiceURL: blobEndpoint, + ServiceURL: serviceURL, AccountName: accountName, AccountKey: accountKey, }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), - AccountName: accountName, - AccountKey: accountKey, - }, nil } // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if tagsMap == nil { - return nil - } blobTagSet := make([]*generated.BlobTag, 0) for key, val := range tagsMap { newKey, newVal := key, val @@ -135,7 +144,7 @@ func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { } func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if tagsMap == nil { + if len(tagsMap) == 0 { return nil } tags := make([]string, 0) @@ -236,3 +245,27 @@ func IsIPEndpointStyle(host string) bool { } return net.ParseIP(host) != nil } + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go deleted file mode 100644 index f3c9d4be7e010..0000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/transfer_manager.go +++ /dev/null @@ -1,156 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package shared - -import ( - "fmt" - "sync" -) - -const _1MiB = 1024 * 1024 - -// TransferManager provides a buffer and thread pool manager for certain transfer options. -// It is undefined behavior if code outside this package call any of these methods. -type TransferManager interface { - // Get provides a buffer that will be used to read data into and write out to the stream. - // It is guaranteed by this package to not read or write beyond the size of the slice. - Get() []byte - - // Put may or may not put the buffer into underlying storage, depending on settings. - // The buffer must not be touched after this has been called. - Put(b []byte) // nolint - - // Run will use a goroutine pool entry to run a function. This blocks until a pool - // goroutine becomes available. - Run(func()) - - // Close shuts down all internal goroutines. This must be called when the TransferManager - // will no longer be used. Not closing it will cause a goroutine leak. - Close() -} - -// --------------------------------------------------------------------------------------------------------------------- - -type staticBuffer struct { - buffers chan []byte - size int - threadpool chan func() -} - -// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer -// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This -// can be shared between calls if you wish to control maximum memory and concurrency with -// multiple concurrent calls. -func NewStaticBuffer(size, max int) (TransferManager, error) { - if size < 1 || max < 1 { - return nil, fmt.Errorf("cannot be called with size or max set to < 1") - } - - if size < _1MiB { - return nil, fmt.Errorf("cannot have size < 1MiB") - } - - threadpool := make(chan func(), max) - buffers := make(chan []byte, max) - for i := 0; i < max; i++ { - go func() { - for f := range threadpool { - f() - } - }() - - buffers <- make([]byte, size) - } - return staticBuffer{ - buffers: buffers, - size: size, - threadpool: threadpool, - }, nil -} - -// Get implements TransferManager.Get(). -func (s staticBuffer) Get() []byte { - return <-s.buffers -} - -// Put implements TransferManager.Put(). -func (s staticBuffer) Put(b []byte) { // nolint - select { - case s.buffers <- b: - default: // This shouldn't happen, but just in case they call Put() with there own buffer. - } -} - -// Run implements TransferManager.Run(). -func (s staticBuffer) Run(f func()) { - s.threadpool <- f -} - -// Close implements TransferManager.Close(). -func (s staticBuffer) Close() { - close(s.threadpool) - close(s.buffers) -} - -// --------------------------------------------------------------------------------------------------------------------- - -type syncPool struct { - threadpool chan func() - pool sync.Pool -} - -// NewSyncPool creates a TransferManager that will use a sync.Pool -// that can hold a non-capped number of buffers constrained by concurrency. This -// can be shared between calls if you wish to share memory and concurrency. -func NewSyncPool(size, concurrency int) (TransferManager, error) { - if size < 1 || concurrency < 1 { - return nil, fmt.Errorf("cannot be called with size or max set to < 1") - } - - if size < _1MiB { - return nil, fmt.Errorf("cannot have size < 1MiB") - } - - threadpool := make(chan func(), concurrency) - for i := 0; i < concurrency; i++ { - go func() { - for f := range threadpool { - f() - } - }() - } - - return &syncPool{ - threadpool: threadpool, - pool: sync.Pool{ - New: func() interface{} { - return make([]byte, size) - }, - }, - }, nil -} - -// Get implements TransferManager.Get(). -func (s *syncPool) Get() []byte { - return s.pool.Get().([]byte) -} - -// Put implements TransferManager.Put(). -// nolint -func (s *syncPool) Put(b []byte) { - s.pool.Put(b) -} - -// Run implements TransferManager.Run(). -func (s *syncPool) Run(f func()) { - s.threadpool <- f -} - -// Close implements TransferManager.Close(). -func (s *syncPool) Close() { - close(s.threadpool) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go new file mode 100644 index 0000000000000..4d26992d3ebd6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch = exported.EventSubmitBatch +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go index 099f48d172179..2896788e1f16a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -11,7 +11,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" ) @@ -51,11 +50,11 @@ type DownloadBufferOptions = blob.DownloadBufferOptions // DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. type DownloadFileOptions = blob.DownloadFileOptions -// CpkInfo contains a group of parameters for client provided encryption key. -type CpkInfo = generated.CpkInfo +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = blob.CPKInfo -// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method. -type CpkScopeInfo = generated.ContainerCpkScopeInfo +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = container.CPKScopeInfo // AccessConditions identifies blob-specific access conditions which you optionally set. type AccessConditions = exported.BlobAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index e210a76e6e68d..ca196f2c8b4c6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -8,10 +8,12 @@ package pageblob import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "net/http" "net/url" "os" + "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -24,9 +26,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage page blob; type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] @@ -36,12 +36,16 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -50,9 +54,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -62,10 +69,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -120,7 +130,7 @@ func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. @@ -132,7 +142,7 @@ func (pb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. @@ -149,14 +159,25 @@ func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (Cre // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) { +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, contentRange blob.HTTPRange, options *UploadPagesOptions) (UploadPagesResponse, error) { count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) if err != nil { return UploadPagesResponse{}, err } - uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + uploadPagesOptions := &generated.PageBlobClientUploadPagesOptions{ + Range: exported.FormatHTTPRange(contentRange), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, uploadPagesOptions) + if err != nil { + return UploadPagesResponse{}, nil + } + } resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) @@ -218,7 +239,7 @@ func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[ if err != nil { return GetPageRangesResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesResponse{}, err } @@ -251,7 +272,7 @@ func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtim if err != nil { return GetPageRangesDiffResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesDiffResponse{}, err } @@ -317,9 +338,27 @@ func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.U return pb.BlobClient().Undelete(ctx, o) } +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return pb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return pb.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return pb.BlobClient().SetLegalHold(ctx, legalHold, options) +} + // SetTier operation sets the tier on a blob. The operation is allowed on a page // blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation // does not update the blob's ETag. // For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. @@ -333,6 +372,12 @@ func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return pb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return pb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -341,7 +386,7 @@ func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeade // SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { return pb.BlobClient().SetMetadata(ctx, metadata, o) } @@ -383,6 +428,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return pb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go index 646587cec8d06..096a7910aa9ca 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -28,7 +28,7 @@ func PossibleCopyStatusTypeValues() []CopyStatusType { return generated.PossibleCopyStatusTypeValues() } -// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier. type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier const ( @@ -50,7 +50,7 @@ func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { return generated.PossiblePremiumPageBlobAccessTierValues() } -// SequenceNumberActionType defines values for SequenceNumberActionType +// SequenceNumberActionType defines values for SequenceNumberActionType. type SequenceNumberActionType = generated.SequenceNumberActionType const ( diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index c1b1194ff83b9..39aef20ff5d3b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -18,7 +18,7 @@ import ( // Type Declarations --------------------------------------------------------------------- -// PageList - the list of pages +// PageList - the list of pages. type PageList = generated.PageList // ClearRange defines a range of pages. @@ -46,16 +46,16 @@ type CreateOptions struct { // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // See Naming and Referencing Containers, Blobs, and Metadata for more information. - Metadata map[string]string + Metadata map[string]*string // Optional. Indicates the tier to be set on the page blob. Tier *PremiumPageBlobAccessTier HTTPHeaders *blob.HTTPHeaders - CpkInfo *blob.CpkInfo + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions // Specifies the date time when the blobs immutability policy is set to expire. @@ -67,7 +67,7 @@ type CreateOptions struct { } func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, - *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil } @@ -79,40 +79,31 @@ func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *gener Tier: o.Tier, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // UploadPagesOptions contains the optional parameters for the Client.UploadPages method. type UploadPagesOptions struct { - // Range specifies a range of bytes. The default value is all bytes. - Range blob.HTTPRange - - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions AccessConditions *blob.AccessConditions } -func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { +func (o *UploadPagesOptions) format() (*generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil, nil, nil, nil - } - - options := &generated.PageBlobClientUploadPagesOptions{ - TransactionalContentCRC64: o.TransactionalContentCRC64, - TransactionalContentMD5: o.TransactionalContentMD5, - Range: exported.FormatHTTPRange(o.Range), + return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -121,14 +112,13 @@ func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptio type UploadPagesFromURLOptions struct { // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentCRC64 []byte - CpkInfo *blob.CpkInfo + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType + + CPKInfo *blob.CPKInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions @@ -137,40 +127,42 @@ type UploadPagesFromURLOptions struct { AccessConditions *blob.AccessConditions } -func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil, nil, nil } options := &generated.PageBlobClientUploadPagesFromURLOptions{ - SourceContentMD5: o.SourceContentMD5, - SourceContentcrc64: o.SourceContentCRC64, CopySourceAuthorization: o.CopySourceAuthorization, } + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- // ClearPagesOptions contains the optional parameters for the Client.ClearPages operation type ClearPagesOptions struct { - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo SequenceNumberAccessConditions *SequenceNumberAccessConditions AccessConditions *blob.AccessConditions } -func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo, - *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- @@ -178,20 +170,20 @@ func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generat // GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. type GetPageRangesOptions struct { Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot // of the target blob. The response will only contain pages that were changed // between the target blob and its previous snapshot. PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response // will contain only pages that were changed between target blob and previous // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental + // specified by PrevSnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string // Range specifies a range of bytes. The default value is all bytes. @@ -206,7 +198,7 @@ type GetPageRangesOptions struct { func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil + return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) @@ -228,20 +220,20 @@ type GetPageRangesDiffOptions struct { // as the value for the marker parameter in a subsequent call to request the next // page of list items. The marker value is opaque to the client. Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value // greater than 5000, the server will return up to 5000 items. Note that if the // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. + // return fewer results than specified by MaxResults, or than the default of 5000. MaxResults *int32 // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot // of the target blob. The response will only contain pages that were changed // between the target blob and its previous snapshot. PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response // will contain only pages that were changed between target blob and previous // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental + // specified by PrevSnapshot is the older of the two. Note that incremental // snapshots are currently supported only for blobs created on or after January 1, 2016. PrevSnapshot *string // Range specifies a range of bytes. The default value is all bytes. @@ -276,19 +268,19 @@ func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRan // ResizeOptions contains the optional parameters for the Client.Resize method. type ResizeOptions struct { - CpkInfo *blob.CpkInfo - CpkScopeInfo *blob.CpkScopeInfo + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo AccessConditions *blob.AccessConditions } func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, - *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) { + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { if o == nil { return nil, nil, nil, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) - return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions + return nil, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go index fa76ed95c416a..4069bb1320233 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -25,21 +25,21 @@ type UserDelegationCredential = exported.UserDelegationCredential // AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas type AccountSignatureValues struct { - Version string `param:"sv"` // If not specified, this format to SASVersion - Protocol Protocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` } // SignWithSharedKey uses an account's shared key credential to sign this signature values to produce // the proper SAS query parameters. func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") } if v.Version == "" { @@ -51,118 +51,61 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } v.Permissions = perms.String() - startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - v.Services, - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - ""}, // That is right, the account SAS requires a terminating extra newline - "\n") - - signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) - if err != nil { - return QueryParameters{}, err - } - p := QueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Account-specific SAS parameters - services: v.Services, - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters. -func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { - return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = Version - } - - perms, err := parseAccountPermissions(v.Permissions) + resources, err := parseAccountResourceTypes(v.ResourceTypes) if err != nil { return QueryParameters{}, err } - v.Permissions = perms.String() + v.ResourceTypes = resources.String() startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) stringToSign := strings.Join([]string{ - exported.GetAccountName(userDelegationCredential), + sharedKeyCredential.AccountName(), v.Permissions, - v.Services, + "b", // blob service v.ResourceTypes, startTime, expiryTime, v.IPRange.String(), string(v.Protocol), v.Version, + v.EncryptionScope, ""}, // That is right, the account SAS requires a terminating extra newline "\n") - signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) if err != nil { return QueryParameters{}, err } p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Account-specific SAS parameters - services: v.Services, + services: "b", // will always be "b" resourceTypes: v.ResourceTypes, // Calculated SAS signature signature: signature, } - udk := exported.GetUDKParams(userDelegationCredential) - - //User delegation SAS specific parameters - p.signedOID = *udk.SignedOID - p.signedTID = *udk.SignedTID - p.signedStart = *udk.SignedStart - p.signedExpiry = *udk.SignedExpiry - p.signedService = *udk.SignedService - p.signedVersion = *udk.SignedVersion - return p, nil } // AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. type AccountPermissions struct { - Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. +// Call this method to set AccountSignatureValues' Permissions field. func (p *AccountPermissions) String() string { var buffer bytes.Buffer if p.Read { @@ -177,6 +120,9 @@ func (p *AccountPermissions) String() string { if p.DeletePreviousVersion { buffer.WriteRune('x') } + if p.PermanentDelete { + buffer.WriteRune('y') + } if p.List { buffer.WriteRune('l') } @@ -192,16 +138,19 @@ func (p *AccountPermissions) String() string { if p.Process { buffer.WriteRune('p') } + if p.FilterByTags { + buffer.WriteRune('f') + } if p.Tag { buffer.WriteRune('t') } - if p.FilterByTags { - buffer.WriteRune('f') + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') } return buffer.String() } -// Parse initializes the AccountSASPermissions' fields from a string. +// Parse initializes the AccountPermissions' fields from a string. func parseAccountPermissions(s string) (AccountPermissions, error) { p := AccountPermissions{} // Clear out the flags for _, r := range s { @@ -212,6 +161,10 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { p.Write = true case 'd': p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true case 'l': p.List = true case 'a': @@ -222,12 +175,12 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { p.Update = true case 'p': p.Process = true - case 'x': - p.Process = true case 't': p.Tag = true case 'f': p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true default: return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) } @@ -235,54 +188,14 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { return p, nil } -// AccountServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s *AccountServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -/*func parseAccountServices(str string) (AccountServices, error) { - s := AccountServices{} // Clear out the flags - for _, r := range str { - switch r { - case 'b': - s.Blob = true - case 'q': - s.Queue = true - case 'f': - s.File = true - default: - return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r) - } - } - return s, nil -}*/ - // AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. type AccountResourceTypes struct { Service, Container, Object bool } // String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. +// Call this method to set AccountSignatureValues' ResourceTypes field. func (rt *AccountResourceTypes) String() string { var buffer bytes.Buffer if rt.Service { @@ -297,9 +210,9 @@ func (rt *AccountResourceTypes) String() string { return buffer.String() } -// Parse initializes the AccountResourceTypes's fields from a string. -/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { - rt := AccountResourceTypes{} // Clear out the flags +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} for _, r := range s { switch r { case 's': @@ -309,8 +222,8 @@ func (rt *AccountResourceTypes) String() string { case 'o': rt.Object = true default: - return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r) + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) } } return rt, nil -}*/ +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 1a9c8c12d17b0..4c23208e2e14a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -23,7 +23,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2020-02-10" + Version = "2021-12-02" ) // TimeFormats ISO 8601 format. @@ -34,10 +34,10 @@ var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01- type Protocol string const ( - // ProtocolHTTPS can be specified for a SAS protocol + // ProtocolHTTPS can be specified for a SAS protocol. ProtocolHTTPS Protocol = "https" - // ProtocolHTTPSandHTTP can be specified for a SAS protocol + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. ProtocolHTTPSandHTTP Protocol = "https,http" ) @@ -116,169 +116,175 @@ func (ipr *IPRange) String() string { // This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). type QueryParameters struct { // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol Protocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - snapshotTime time.Time `param:"snapshot"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` - cacheControl string `param:"rscc"` - contentDisposition string `param:"rscd"` - contentEncoding string `param:"rsce"` - contentLanguage string `param:"rscl"` - contentType string `param:"rsct"` - signedOID string `param:"skoid"` - signedTID string `param:"sktid"` - signedStart time.Time `param:"skt"` - signedService string `param:"sks"` - signedExpiry time.Time `param:"ske"` - signedVersion string `param:"skv"` - signedDirectoryDepth string `param:"sdd"` - preauthorizedAgentObjectID string `param:"saoid"` - agentObjectID string `param:"suoid"` - correlationID string `param:"scid"` + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + authorizedObjectID string `param:"saoid"` + unauthorizedObjectID string `param:"suoid"` + correlationID string `param:"scid"` + encryptionScope string `param:"ses"` // private member used for startTime and expiryTime formatting. stTimeFormat string seTimeFormat string } -// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID -func (p *QueryParameters) PreauthorizedAgentObjectID() string { - return p.preauthorizedAgentObjectID +// AuthorizedObjectID returns authorizedObjectID. +func (p *QueryParameters) AuthorizedObjectID() string { + return p.authorizedObjectID } -// AgentObjectID returns agentObjectID -func (p *QueryParameters) AgentObjectID() string { - return p.agentObjectID +// UnauthorizedObjectID returns unauthorizedObjectID. +func (p *QueryParameters) UnauthorizedObjectID() string { + return p.unauthorizedObjectID } -// SignedCorrelationID returns signedCorrelationID +// SignedCorrelationID returns signedCorrelationID. func (p *QueryParameters) SignedCorrelationID() string { return p.correlationID } -// SignedOID returns signedOID +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + +// SignedOID returns signedOID. func (p *QueryParameters) SignedOID() string { return p.signedOID } -// SignedTID returns signedTID +// SignedTID returns signedTID. func (p *QueryParameters) SignedTID() string { return p.signedTID } -// SignedStart returns signedStart +// SignedStart returns signedStart. func (p *QueryParameters) SignedStart() time.Time { return p.signedStart } -// SignedExpiry returns signedExpiry +// SignedExpiry returns signedExpiry. func (p *QueryParameters) SignedExpiry() time.Time { return p.signedExpiry } -// SignedService returns signedService +// SignedService returns signedService. func (p *QueryParameters) SignedService() string { return p.signedService } -// SignedVersion returns signedVersion +// SignedVersion returns signedVersion. func (p *QueryParameters) SignedVersion() string { return p.signedVersion } -// SnapshotTime returns snapshotTime +// SnapshotTime returns snapshotTime. func (p *QueryParameters) SnapshotTime() time.Time { return p.snapshotTime } -// Version returns version +// Version returns version. func (p *QueryParameters) Version() string { return p.version } -// Services returns services +// Services returns services. func (p *QueryParameters) Services() string { return p.services } -// ResourceTypes returns resourceTypes +// ResourceTypes returns resourceTypes. func (p *QueryParameters) ResourceTypes() string { return p.resourceTypes } -// Protocol returns protocol +// Protocol returns protocol. func (p *QueryParameters) Protocol() Protocol { return p.protocol } -// StartTime returns startTime +// StartTime returns startTime. func (p *QueryParameters) StartTime() time.Time { return p.startTime } -// ExpiryTime returns expiryTime +// ExpiryTime returns expiryTime. func (p *QueryParameters) ExpiryTime() time.Time { return p.expiryTime } -// IPRange returns ipRange +// IPRange returns ipRange. func (p *QueryParameters) IPRange() IPRange { return p.ipRange } -// Identifier returns identifier +// Identifier returns identifier. func (p *QueryParameters) Identifier() string { return p.identifier } -// Resource returns resource +// Resource returns resource. func (p *QueryParameters) Resource() string { return p.resource } -// Permissions returns permissions +// Permissions returns permissions. func (p *QueryParameters) Permissions() string { return p.permissions } -// Signature returns signature +// Signature returns signature. func (p *QueryParameters) Signature() string { return p.signature } -// CacheControl returns cacheControl +// CacheControl returns cacheControl. func (p *QueryParameters) CacheControl() string { return p.cacheControl } -// ContentDisposition returns contentDisposition +// ContentDisposition returns contentDisposition. func (p *QueryParameters) ContentDisposition() string { return p.contentDisposition } -// ContentEncoding returns contentEncoding +// ContentEncoding returns contentEncoding. func (p *QueryParameters) ContentEncoding() string { return p.contentEncoding } -// ContentLanguage returns contentLanguage +// ContentLanguage returns contentLanguage. func (p *QueryParameters) ContentLanguage() string { return p.contentLanguage } -// ContentType returns sontentType +// ContentType returns contentType. func (p *QueryParameters) ContentType() string { return p.contentType } -// SignedDirectoryDepth returns signedDirectoryDepth +// SignedDirectoryDepth returns signedDirectoryDepth. func (p *QueryParameters) SignedDirectoryDepth() string { return p.signedDirectoryDepth } @@ -346,15 +352,18 @@ func (p *QueryParameters) Encode() string { if p.signedDirectoryDepth != "" { v.Add("sdd", p.signedDirectoryDepth) } - if p.preauthorizedAgentObjectID != "" { - v.Add("saoid", p.preauthorizedAgentObjectID) + if p.authorizedObjectID != "" { + v.Add("saoid", p.authorizedObjectID) } - if p.agentObjectID != "" { - v.Add("suoid", p.agentObjectID) + if p.unauthorizedObjectID != "" { + v.Add("suoid", p.unauthorizedObjectID) } if p.correlationID != "" { v.Add("scid", p.correlationID) } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } return v.Encode() } @@ -424,11 +433,13 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q case "sdd": p.signedDirectoryDepth = val case "saoid": - p.preauthorizedAgentObjectID = val + p.authorizedObjectID = val case "suoid": - p.agentObjectID = val + p.unauthorizedObjectID = val case "scid": p.correlationID = val + case "ses": + p.encryptionScope = val default: isSASKey = false // We didn't recognize the query parameter } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 98d853d4e9f89..45f730847d28f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -8,6 +8,7 @@ package sas import ( "bytes" + "errors" "fmt" "strings" "time" @@ -16,28 +17,30 @@ import ( ) // BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas type BlobSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to Version - Protocol Protocol `param:"spr"` // See the Protocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - SnapshotTime time.Time - Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Identifier string `param:"si"` - ContainerName string - BlobName string // Use "" to create a Container SAS - Directory string // Not nil for a directory SAS (ie sr=d) - CacheControl string // rscc - ContentDisposition string // rscd - ContentEncoding string // rsce - ContentLanguage string // rscl - ContentType string // rsct - BlobVersion string // sr=bv - PreauthorizedAgentObjectId string - AgentObjectId string - CorrelationId string + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + AuthorizedObjectID string // saoid + UnauthorizedObjectID string // suoid + CorrelationID string // scid + EncryptionScope string `param:"ses"` } func getDirectoryDepth(path string) string { @@ -49,17 +52,11 @@ func getDirectoryDepth(path string) string { // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if sharedKeyCredential == nil { - return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -74,6 +71,21 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { v.Version = Version } @@ -92,7 +104,8 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -107,26 +120,27 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters - resource: resource, - identifier: v.Identifier, - cacheControl: v.CacheControl, - contentDisposition: v.ContentDisposition, - contentEncoding: v.ContentEncoding, - contentLanguage: v.ContentLanguage, - contentType: v.ContentType, - snapshotTime: v.SnapshotTime, - signedDirectoryDepth: getDirectoryDepth(v.Directory), - preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, - agentObjectID: v.AgentObjectId, - correlationID: v.CorrelationId, + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, // Calculated SAS signature signature: signature, } @@ -140,13 +154,11 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") } - v.Permissions = perms.String() + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -160,6 +172,20 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us } else { resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } if v.Version == "" { v.Version = Version @@ -169,32 +195,27 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us udk := exported.GetUDKParams(userDelegationCredential) udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) - //I don't like this answer to combining the functions - //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. - signedIdentifier := strings.Join([]string{ - *udk.SignedOID, - *udk.SignedTID, - udkStart, - udkExpiry, - *udk.SignedService, - *udk.SignedVersion, - v.PreauthorizedAgentObjectId, - v.AgentObjectId, - v.CorrelationId, - }, "\n") - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx stringToSign := strings.Join([]string{ v.Permissions, startTime, expiryTime, getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), - signedIdentifier, + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.AuthorizedObjectID, + v.UnauthorizedObjectID, + v.CorrelationID, v.IPRange.String(), string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -209,26 +230,27 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters - resource: resource, - identifier: v.Identifier, - cacheControl: v.CacheControl, - contentDisposition: v.ContentDisposition, - contentEncoding: v.ContentEncoding, - contentLanguage: v.ContentLanguage, - contentType: v.ContentType, - snapshotTime: v.SnapshotTime, - signedDirectoryDepth: getDirectoryDepth(v.Directory), - preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId, - agentObjectID: v.AgentObjectId, - correlationID: v.CorrelationId, + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, // Calculated SAS signature signature: signature, } @@ -258,15 +280,15 @@ func getCanonicalName(account string, containerName string, blobName string, dir } // ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob type ContainerPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool - Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts } // String produces the SAS permissions string for an Azure Storage container. -// Call this method to set BlobSASSignatureValues's Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *ContainerPermissions) String() string { var b bytes.Buffer if p.Read { @@ -293,6 +315,12 @@ func (p *ContainerPermissions) String() string { if p.Tag { b.WriteRune('t') } + if p.FilterByTags { + b.WriteRune('f') + } + if p.Move { + b.WriteRune('m') + } if p.Execute { b.WriteRune('e') } @@ -302,11 +330,14 @@ func (p *ContainerPermissions) String() string { if p.ModifyPermissions { b.WriteRune('p') } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } return b.String() } -// Parse initializes the ContainerSASPermissions' fields from a string. -/*func parseContainerPermissions(s string) (ContainerPermissions, error) { +// Parse initializes ContainerPermissions' fields from a string. +func parseContainerPermissions(s string) (ContainerPermissions, error) { p := ContainerPermissions{} // Clear the flags for _, r := range s { switch r { @@ -326,27 +357,33 @@ func (p *ContainerPermissions) String() string { p.List = true case 't': p.Tag = true + case 'f': + p.FilterByTags = true + case 'm': + p.Move = true case 'e': p.Execute = true case 'o': p.ModifyOwnership = true case 'p': p.ModifyPermissions = true + case 'i': + p.SetImmutabilityPolicy = true default: return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } } return p, nil -}*/ +} // BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. type BlobPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool + Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSignatureValues's Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *BlobPermissions) String() string { var b bytes.Buffer if p.Read { @@ -367,12 +404,15 @@ func (p *BlobPermissions) String() string { if p.DeletePreviousVersion { b.WriteRune('x') } - if p.Tag { - b.WriteRune('t') + if p.PermanentDelete { + b.WriteRune('y') } if p.List { b.WriteRune('l') } + if p.Tag { + b.WriteRune('t') + } if p.Move { b.WriteRune('m') } @@ -385,10 +425,13 @@ func (p *BlobPermissions) String() string { if p.Permissions { b.WriteRune('p') } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } return b.String() } -// Parse initializes the BlobSASPermissions's fields from a string. +// Parse initializes BlobPermissions' fields from a string. func parseBlobPermissions(s string) (BlobPermissions, error) { p := BlobPermissions{} // Clear the flags for _, r := range s { @@ -405,10 +448,12 @@ func parseBlobPermissions(s string) (BlobPermissions, error) { p.Delete = true case 'x': p.DeletePreviousVersion = true - case 't': - p.Tag = true + case 'y': + p.PermanentDelete = true case 'l': p.List = true + case 't': + p.Tag = true case 'm': p.Move = true case 'e': @@ -417,6 +462,8 @@ func parseBlobPermissions(s string) (BlobPermissions, error) { p.Ownership = true case 'p': p.Permissions = true + case 'i': + p.SetImmutabilityPolicy = true default: return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go new file mode 100644 index 0000000000000..924fd1081feea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index 724d92b91c932..ccf4159c240f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -7,8 +7,13 @@ package service import ( + "bytes" "context" "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -18,7 +23,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -26,9 +30,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. type Client base.Client[generated.ServiceClient] @@ -38,12 +40,16 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + audience := base.GetAudience((*base.ClientOptions)(options)) + authPolicy := shared.NewStorageChallengePolicy(cred, audience) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -52,9 +58,12 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -64,10 +73,14 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } - return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -115,19 +128,29 @@ func (s *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ServiceClient])(s)) } +func (s *Client) credential() any { + return base.Credential((*base.Client[generated.ServiceClient])(s)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() } -// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of -// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client. -// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's -// NewContainerClient method. +// NewContainerClient creates a new container.Client object by concatenating containerName to the end of +// this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -157,6 +180,7 @@ func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName stri } // GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { getAccountInfoOptions := o.format() resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) @@ -175,6 +199,9 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if o.Include.Metadata { listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) } + if o.Include.System { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) + } listOptions.Marker = o.Marker listOptions.Maxresults = o.MaxResults listOptions.Prefix = o.Prefix @@ -189,13 +216,13 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if page == nil { req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) } else { - listOptions.Marker = page.Marker + listOptions.Marker = page.NextMarker req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) } if err != nil { return ListContainersResponse{}, err } - resp, err := s.generated().Pipeline().Do(req) + resp, err := s.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListContainersResponse{}, err } @@ -246,19 +273,17 @@ func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (Ge // GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. // It can only be used if the credential supplied during creation was a SharedKeyCredential. -// This validity can be checked with CanGetAccountSASToken(). -func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) { +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { if s.sharedKey() == nil { - return "", errors.New("SAS can only be signed with a SharedKeyCredential") + return "", bloberror.MissingSharedKeyCredential } - + st := o.format() qps, err := sas.AccountSignatureValues{ Version: sas.Version, Protocol: sas.ProtocolHTTPS, Permissions: permissions.String(), - Services: services.String(), ResourceTypes: resources.String(), - StartTime: start.UTC(), + StartTime: st, ExpiryTime: expiry.UTC(), }.SignWithSharedKey(s.sharedKey()) if err != nil { @@ -280,8 +305,73 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A // https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags // eg. "dog='germanshepherd' and penguin='emperorpenguin'" // To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) { +func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { serviceFilterBlobsOptions := o.format() - resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions) + resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions) return resp, err } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS. +func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := s.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(s.getClientOptions())) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: s.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go index b0354cd90e3e2..b70724d797439 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -7,9 +7,12 @@ package service import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "time" ) // SharedKeyCredential contains an account's name and its primary or secondary key. @@ -30,7 +33,7 @@ type UserDelegationKey = generated.UserDelegationKey // KeyInfo contains KeyInfo struct. type KeyInfo = generated.KeyInfo -// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method. type GetUserDelegationCredentialOptions struct { // placeholder for future options } @@ -42,11 +45,20 @@ func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGe // AccessConditions identifies container-specific access conditions which you optionally set. type AccessConditions = exported.ContainerAccessConditions -// CpkInfo contains a group of parameters for the BlobClient.Download method. -type CpkInfo = generated.CpkInfo +// BlobTag - a key/value pair on a blob +type BlobTag = generated.BlobTag -// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CpkScopeInfo = generated.CpkScopeInfo +// ContainerItem - An Azure Storage container returned from method Client.ListContainersSegment. +type ContainerItem = generated.ContainerItem + +// ContainerProperties - Properties of a container +type ContainerProperties = generated.ContainerProperties + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo = generated.CPKInfo + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo = generated.CPKScopeInfo // CreateContainerOptions contains the optional parameters for the container.Client.Create method. type CreateContainerOptions = container.CreateOptions @@ -57,22 +69,34 @@ type DeleteContainerOptions = container.DeleteOptions // RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. type RestoreContainerOptions = container.RestoreOptions -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another // domain. Web browsers implement a security restriction known as same-origin policy that // prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin -// domain) to call APIs in another domain -type CorsRule = generated.CorsRule +// domain) to call APIs in another domain. +type CORSRule = generated.CORSRule + +// FilterBlobSegment - The result of a Filter Blobs API call. +type FilterBlobSegment = generated.FilterBlobSegment + +// BlobTags - Blob tags +type BlobTags = generated.BlobTags + +// FilterBlobItem - Blob info returned from method Client.FilterBlobs. +type FilterBlobItem = generated.FilterBlobItem + +// GeoReplication - Geo-Replication information for the Secondary Storage Service. +type GeoReplication = generated.GeoReplication -// RetentionPolicy - the retention policy which determines how long the associated data should persist +// RetentionPolicy - the retention policy which determines how long the associated data should persist. type RetentionPolicy = generated.RetentionPolicy -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs. type Metrics = generated.Metrics // Logging - Azure Analytics Logging settings. type Logging = generated.Logging -// StaticWebsite - The properties that enable an account to host a static website +// StaticWebsite - The properties that enable an account to host a static website. type StaticWebsite = generated.StaticWebsite // StorageServiceProperties - Storage Service Properties. @@ -105,7 +129,7 @@ func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOpt // --------------------------------------------------------------------------------------------------------------------- -// ListContainersOptions provides set of configurations for ListContainers operation +// ListContainersOptions provides set of configurations for ListContainers operation. type ListContainersOptions struct { Include ListContainersInclude @@ -132,6 +156,9 @@ type ListContainersInclude struct { // Tells the service whether to return soft-deleted containers. Deleted bool + + // Tells the service whether to return system containers. + System bool } // --------------------------------------------------------------------------------------------------------------------- @@ -139,25 +166,28 @@ type ListContainersInclude struct { // SetPropertiesOptions provides set of options for Client.SetProperties type SetPropertiesOptions struct { // The set of CORS rules. - Cors []*CorsRule + CORS []*CORSRule // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible - // values include version 2008-10-27 and all more recent versions + // values include version 2008-10-27 and all more recent versions. DefaultServiceVersion *string - // the retention policy which determines how long the associated data should persist + // the retention policy which determines how long the associated data should persist. DeleteRetentionPolicy *RetentionPolicy // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" HourMetrics *Metrics // Azure Analytics Logging settings. + // If version is not set - we default to "1.0" Logging *Logging // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" MinuteMetrics *Metrics - // The properties that enable an account to host a static website + // The properties that enable an account to host a static website. StaticWebsite *StaticWebsite } @@ -166,8 +196,45 @@ func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *ge return generated.StorageServiceProperties{}, nil } + defaultVersion := to.Ptr[string]("1.0") + defaultAge := to.Ptr[int32](0) + emptyStr := to.Ptr[string]("") + + if o.CORS != nil { + for i := 0; i < len(o.CORS); i++ { + if o.CORS[i].AllowedHeaders == nil { + o.CORS[i].AllowedHeaders = emptyStr + } + if o.CORS[i].ExposedHeaders == nil { + o.CORS[i].ExposedHeaders = emptyStr + } + if o.CORS[i].MaxAgeInSeconds == nil { + o.CORS[i].MaxAgeInSeconds = defaultAge + } + } + } + + if o.HourMetrics != nil { + if o.HourMetrics.Version == nil { + o.HourMetrics.Version = defaultVersion + } + } + + if o.Logging != nil { + if o.Logging.Version == nil { + o.Logging.Version = defaultVersion + } + } + + if o.MinuteMetrics != nil { + if o.MinuteMetrics.Version == nil { + o.MinuteMetrics.Version = defaultVersion + } + + } + return generated.StorageServiceProperties{ - Cors: o.Cors, + CORS: o.CORS, DefaultServiceVersion: o.DefaultServiceVersion, DeleteRetentionPolicy: o.DeleteRetentionPolicy, HourMetrics: o.HourMetrics, @@ -179,6 +246,27 @@ func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *ge // --------------------------------------------------------------------------------------------------------------------- +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + // GetStatisticsOptions provides set of options for Client.GetStatistics type GetStatisticsOptions struct { // placeholder for future options @@ -190,7 +278,7 @@ func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOpt // --------------------------------------------------------------------------------------------------------------------- -// FilterBlobsOptions provides set of options for Client.FindBlobsByTags +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags. type FilterBlobsOptions struct { // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing @@ -204,8 +292,6 @@ type FilterBlobsOptions struct { // of the results. For this reason, it is possible that the service will // return fewer results than specified by maxresults, or than the default of 5000. MaxResults *int32 - // Filters the results to return only to return only blobs whose tags match the specified expression. - Where *string } func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { @@ -215,6 +301,61 @@ func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions return &generated.ServiceClientFilterBlobsOptions{ Marker: o.Marker, Maxresults: o.MaxResults, - Where: o.Where, } } + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go index 788514cca3721..2dbf971654107 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -7,6 +7,7 @@ package service import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -25,6 +26,9 @@ type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse // ListContainersResponse contains the response from method Client.ListContainersSegment. type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + // GetPropertiesResponse contains the response from method Client.GetProperties. type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse @@ -39,3 +43,21 @@ type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse // GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json index b23e56abbb9c3..c6259f7ab02f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json @@ -20,12 +20,13 @@ } }, "variables": { - "mgmtApiVersion": "2019-06-01", + "mgmtApiVersion": "2022-09-01", "authorizationApiVersion": "2018-09-01-preview", "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", "primaryEncryptionScopeName": "encryptionScope", "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", @@ -120,6 +121,45 @@ "[variables('primaryAccountName')]" ] }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('immutableAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "immutableStorageWithVersioning": { + "enabled": true + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('immutableAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('immutableAccountName')]" + ] + }, { "type": "Microsoft.Storage/storageAccounts/encryptionScopes", "apiVersion": "[variables('mgmtApiVersion')]", @@ -220,7 +260,9 @@ "apiVersion": "[variables('mgmtApiVersion')]", "name": "[concat(variables('softDeleteAccountName'), '/default')]", "properties": { + "isVersioningEnabled": true, "deleteRetentionPolicy": { + "allowPermanentDelete": true, "enabled": true, "days": 1 }, @@ -424,11 +466,11 @@ "type": "string", "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" }, - "DATALAKE_STORAGE_ACCOUNT_NAME": { + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { "type": "string", "value": "[variables('dataLakeAccountName')]" }, - "DATALAKE_STORAGE_ACCOUNT_KEY": { + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { "type": "string", "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" }, @@ -448,6 +490,30 @@ "type": "string", "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('immutableAccountName')]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { "type": "string", "value": "[variables('softDeleteAccountName')]" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index 6612feb4bf88c..1841d146f5f53 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -59,6 +59,8 @@ added, it doesn't exist in real life. As such I've put a PEM decoder into here. // For details see https://aka.ms/msal-net-authenticationresult type AuthResult = base.AuthResult +type AuthenticationScheme = authority.AuthenticationScheme + type Account = shared.Account // CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file @@ -454,6 +456,33 @@ func WithClaims(claims string) interface { } } +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireByCredentialOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *acquireTokenByCredentialOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New]. // This option is valid for any token acquisition method. func WithTenantID(tenantID string) interface { @@ -499,6 +528,7 @@ func WithTenantID(tenantID string) interface { type acquireTokenSilentOptions struct { account Account claims, tenantID string + authnScheme AuthenticationScheme } // AcquireSilentOption is implemented by options for AcquireTokenSilent @@ -549,6 +579,7 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts Credential: cca.cred, IsAppCache: o.account.IsZero(), TenantID: o.tenantID, + AuthnScheme: o.authnScheme, } return cca.base.AcquireTokenSilent(ctx, silentParameters) @@ -614,6 +645,7 @@ func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redir // acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential type acquireTokenByCredentialOptions struct { claims, tenantID string + authnScheme AuthenticationScheme } // AcquireByCredentialOption is implemented by options for AcquireTokenByCredential @@ -637,7 +669,9 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, authParams.Scopes = scopes authParams.AuthorizationType = authority.ATClientCredentials authParams.Claims = o.claims - + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } token, err := cca.base.Token.Credential(ctx, authParams, cca.cred) if err != nil { return AuthResult{}, err diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 5f68384f68bc9..09a0d92f5205b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -54,6 +54,7 @@ type AcquireTokenSilentParameters struct { UserAssertion string AuthorizationType authority.AuthorizeType Claims string + AuthnScheme authority.AuthenticationScheme } // AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow. @@ -289,6 +290,9 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen authParams.AuthorizationType = silent.AuthorizationType authParams.Claims = silent.Claims authParams.UserAssertion = silent.UserAssertion + if silent.AuthnScheme != nil { + authParams.AuthnScheme = silent.AuthnScheme + } m := b.pmanager if authParams.AuthorizationType != authority.ATOnBehalfOf { @@ -313,6 +317,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if silent.Claims == "" { ar, err = AuthResultFromStorage(storageTokenResponse) if err == nil { + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } } @@ -417,6 +422,11 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au if err == nil && b.cacheAccessor != nil { err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key}) } + if err != nil { + return AuthResult{}, err + } + + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go index 548c2faebf962..f9be90276dad6 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go @@ -12,6 +12,7 @@ import ( internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" ) @@ -75,12 +76,14 @@ type AccessToken struct { ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` CachedAt internalTime.Unix `json:"cached_at,omitempty"` UserAssertionHash string `json:"user_assertion_hash,omitempty"` + TokenType string `json:"token_type,omitempty"` + AuthnSchemeKeyID string `json:"keyid,omitempty"` AdditionalFields map[string]interface{} } // NewAccessToken is the constructor for AccessToken. -func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken { +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { return AccessToken{ HomeAccountID: homeID, Environment: env, @@ -92,15 +95,23 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex CachedAt: internalTime.Unix{T: cachedAt.UTC()}, ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, + TokenType: tokenType, + AuthnSchemeKeyID: authnSchemeKeyID, } } // Key outputs the key that can be used to uniquely look up this entry in a map. func (a AccessToken) Key() string { - return strings.Join( + key := strings.Join( []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, shared.CacheKeySeparator, ) + // add token type to key for new access tokens types. skip for bearer token type to + // preserve fwd and back compat between a common cache and msal clients + if !strings.EqualFold(a.TokenType, authority.AccessTokenTypeBearer) { + key = strings.Join([]string{key, a.TokenType}, shared.CacheKeySeparator) + } + return strings.ToLower(key) } // FakeValidate enables tests to fake access token validation @@ -167,10 +178,11 @@ func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken { // Key outputs the key that can be used to uniquely look up this entry in a map. func (id IDToken) Key() string { - return strings.Join( + key := strings.Join( []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm}, shared.CacheKeySeparator, ) + return strings.ToLower(key) } // AppMetaData is the JSON representation of application metadata for encoding to storage. @@ -193,8 +205,9 @@ func NewAppMetaData(familyID, clientID, environment string) AppMetaData { // Key outputs the key that can be used to uniquely look up this entry in a map. func (a AppMetaData) Key() string { - return strings.Join( + key := strings.Join( []string{"AppMetaData", a.Environment, a.ClientID}, shared.CacheKeySeparator, ) + return strings.ToLower(key) } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go index 87d7d797b3e1a..c0931833064fc 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go @@ -41,6 +41,8 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() // fetch metadata if instanceDiscovery is enabled aliases := []string{authParameters.AuthorityInfo.Host} @@ -57,7 +59,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token. - accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest) + accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest, tokenType, authnSchemeKeyID) if err == nil { tr.AccessToken = accessToken } @@ -84,7 +86,7 @@ func (m *PartitionedManager) Read(ctx context.Context, authParameters authority. // Write writes a token response to the cache and returns the account information the token is stored with. func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() + authParameters.HomeAccountID = tokenResponse.HomeAccountID() homeAccountID := authParameters.HomeAccountID environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant @@ -92,7 +94,7 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) userAssertionHash := authParameters.AssertionHash() cachedAt := time.Now() - + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() var account shared.Account if len(tokenResponse.RefreshToken) > 0 { @@ -116,6 +118,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, ) if authParameters.AuthorizationType == authority.ATOnBehalfOf { accessToken.UserAssertionHash = userAssertionHash // get Hash method on this @@ -215,7 +219,7 @@ func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo auth return m.aadCache[authorityInfo.Host], nil } -func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) { +func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey, tokenType, authnSchemeKeyID string) (AccessToken, error) { m.contractMu.RLock() defer m.contractMu.RUnlock() if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok { @@ -224,9 +228,11 @@ func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientI // an issue, however if it does become a problem then we know where to look. for _, at := range accessTokens { if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { - return at, nil + if at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID { + if checkAlias(at.Environment, envAliases) { + if isMatchingScopes(scopes, at.Scopes) { + return at, nil + } } } } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go index add7519252dd9..2221e60c437f9 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go @@ -82,6 +82,39 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool { return scopeCounter == len(scopesOne) } +// needsUpgrade returns true if the given key follows the v1.0 schema i.e., +// it contains an uppercase character (v1.1+ keys are all lowercase) +func needsUpgrade(key string) bool { + for _, r := range key { + if 'A' <= r && r <= 'Z' { + return true + } + } + return false +} + +// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting +// the v1.0 item. Callers must hold an exclusive lock on m. +func upgrade[T any](m map[string]T, k string) T { + v1_1Key := strings.ToLower(k) + v, ok := m[k] + if !ok { + // another goroutine did the upgrade while this one was waiting for the write lock + return m[v1_1Key] + } + if v2, ok := m[v1_1Key]; ok { + // cache has an equivalent v1.1+ item, which we prefer because we know it was added + // by a newer version of the module and is therefore more likely to remain valid. + // The v1.0 item may have expired because only v1.0 or earlier would update it. + v = v2 + } else { + // add an equivalent item according to the v1.1 schema + m[v1_1Key] = v + } + delete(m, k) + return v +} + // Read reads a storage token from the cache if it exists. func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) { tr := TokenResponse{} @@ -89,6 +122,8 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID scopes := authParameters.Scopes + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() + tokenType := authParameters.AuthnScheme.AccessTokenType() // fetch metadata if instanceDiscovery is enabled aliases := []string{authParameters.AuthorityInfo.Host} @@ -100,7 +135,7 @@ func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) aliases = metadata.Aliases } - accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes) + accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID) tr.AccessToken = accessToken if homeAccountID == "" { @@ -134,13 +169,13 @@ const scopeSeparator = " " // Write writes a token response to the cache and returns the account information the token is stored with. func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) { - authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID() - homeAccountID := authParameters.HomeAccountID + homeAccountID := tokenResponse.HomeAccountID() environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) cachedAt := time.Now() + authnSchemeKeyID := authParameters.AuthnScheme.KeyID() var account shared.Account @@ -162,6 +197,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, + tokenResponse.TokenType, + authnSchemeKeyID, ) // Since we have a valid access token, cache it before moving on. @@ -249,21 +286,27 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) return m.aadCache[authorityInfo.Host], nil } -func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken { +func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken { m.contractMu.RLock() - defer m.contractMu.RUnlock() // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens. // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't // an issue, however if it does become a problem then we know where to look. - for _, at := range m.contract.AccessTokens { + for k, at := range m.contract.AccessTokens { if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID { - if checkAlias(at.Environment, envAliases) { - if isMatchingScopes(scopes, at.Scopes) { + if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) { + if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + at = upgrade(m.contract.AccessTokens, k) + } return at } } } } + m.contractMu.RUnlock() return AccessToken{} } @@ -304,15 +347,21 @@ func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response). // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95 m.contractMu.RLock() - defer m.contractMu.RUnlock() for _, matcher := range matchers { - for _, rt := range m.contract.RefreshTokens { + for k, rt := range m.contract.RefreshTokens { if matcher(rt) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + rt = upgrade(m.contract.RefreshTokens, k) + } return rt, nil } } } + m.contractMu.RUnlock() return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found") } @@ -334,14 +383,20 @@ func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) erro func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - for _, idt := range m.contract.IDTokens { + for k, idt := range m.contract.IDTokens { if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID { if checkAlias(idt.Environment, envAliases) { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + idt = upgrade(m.contract.IDTokens, k) + } return idt, nil } } } + m.contractMu.RUnlock() return IDToken{}, fmt.Errorf("token not found") } @@ -380,7 +435,6 @@ func (m *Manager) Account(homeAccountID string) shared.Account { func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key. // We only use a map because the storage contract shared between all language implementations says use a map. @@ -388,11 +442,18 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored // is really low (say 2). Each hash is more expensive than the entire iteration. - for _, acc := range m.contract.Accounts { + for k, acc := range m.contract.Accounts { if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + acc = upgrade(m.contract.Accounts, k) + } return acc, nil } } + m.contractMu.RUnlock() return shared.Account{}, fmt.Errorf("account not found") } @@ -406,13 +467,18 @@ func (m *Manager) writeAccount(account shared.Account) error { func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) { m.contractMu.RLock() - defer m.contractMu.RUnlock() - - for _, app := range m.contract.AppMetaData { + for k, app := range m.contract.AppMetaData { if checkAlias(app.Environment, envAliases) && app.ClientID == clientID { + m.contractMu.RUnlock() + if needsUpgrade(k) { + m.contractMu.Lock() + defer m.contractMu.Unlock() + app = upgrade(m.contract.AppMetaData, k) + } return app, nil } } + m.contractMu.RUnlock() return AppMetaData{}, fmt.Errorf("not found") } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json deleted file mode 100644 index 1d8181924d145..0000000000000 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "Account": { - "uid.utid-login.windows.net-contoso": { - "username": "John Doe", - "local_account_id": "object1234", - "realm": "contoso", - "environment": "login.windows.net", - "home_account_id": "uid.utid", - "authority_type": "MSSTS" - } - }, - "RefreshToken": { - "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": { - "target": "s2 s1 s3", - "environment": "login.windows.net", - "credential_type": "RefreshToken", - "secret": "a refresh token", - "client_id": "my_client_id", - "home_account_id": "uid.utid" - } - }, - "AccessToken": { - "an-entry": { - "foo": "bar" - }, - "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": { - "environment": "login.windows.net", - "credential_type": "AccessToken", - "secret": "an access token", - "realm": "contoso", - "target": "s2 s1 s3", - "client_id": "my_client_id", - "cached_at": "1000", - "home_account_id": "uid.utid", - "extended_expires_on": "4600", - "expires_on": "4600" - } - }, - "IdToken": { - "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": { - "realm": "contoso", - "environment": "login.windows.net", - "credential_type": "IdToken", - "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature", - "client_id": "my_client_id", - "home_account_id": "uid.utid" - } - }, - "unknownEntity": {"field1":"1","field2":"whats"}, - "AppMetadata": { - "AppMetadata-login.windows.net-my_client_id": { - "environment": "login.windows.net", - "client_id": "my_client_id" - } - } - } \ No newline at end of file diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ebd86e2baf9ce..ef8d908a444f2 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -119,6 +119,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams return accesstokens.TokenResponse{}, err } return accesstokens.TokenResponse{ + TokenType: authParams.AuthnScheme.AccessTokenType(), AccessToken: tr.AccessToken, ExpiresOn: internalTime.DurationTime{ T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index fa6bb61c8ef23..a7b7b0742d876 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -30,7 +30,7 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" - "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v5" "github.com/google/uuid" ) @@ -380,6 +380,12 @@ func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.Auth func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) { resp := TokenResponse{} + if authParams.AuthnScheme != nil { + trParams := authParams.AuthnScheme.TokenRequestParams() + for k, v := range trParams { + qv.Set(k, v) + } + } err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp) if err != nil { return resp, err diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go index b3892bf3f3245..3107b45c11365 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -146,17 +146,6 @@ func (c *ClientInfo) UnmarshalJSON(b []byte) error { return nil } -// HomeAccountID creates the home account ID. -func (c ClientInfo) HomeAccountID() string { - if c.UID == "" { - return "" - } else if c.UTID == "" { - return fmt.Sprintf("%s.%s", c.UID, c.UID) - } else { - return fmt.Sprintf("%s.%s", c.UID, c.UTID) - } -} - // Scopes represents scopes in a TokenResponse. type Scopes struct { Slice []string @@ -179,6 +168,7 @@ type TokenResponse struct { AccessToken string `json:"access_token"` RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` FamilyID string `json:"foci"` IDToken IDToken `json:"id_token"` @@ -206,6 +196,19 @@ func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) { tr.scopesComputed = true } +// HomeAccountID uniquely identifies the authenticated account, if any. It's "" when the token is an app token. +func (tr *TokenResponse) HomeAccountID() string { + id := tr.IDToken.Subject + if uid := tr.ClientInfo.UID; uid != "" { + utid := tr.ClientInfo.UTID + if utid == "" { + utid = uid + } + id = fmt.Sprintf("%s.%s", uid, utid) + } + return id +} + // Validate validates the TokenResponse has basic valid values. It must be called // after ComputeScopes() is called. func (tr *TokenResponse) Validate() error { @@ -231,7 +234,7 @@ func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string { return authParams.AppKey() } if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken { - return tr.ClientInfo.HomeAccountID() + return tr.HomeAccountID() } return "" } @@ -294,10 +297,11 @@ func (rt RefreshToken) Key() string { fourth = rt.ClientID } - return strings.Join( + key := strings.Join( []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth}, shared.CacheKeySeparator, ) + return strings.ToLower(key) } func (rt RefreshToken) GetSecret() string { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 7b2ccb4f5d20b..9d60734f88e24 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -29,6 +29,7 @@ const ( defaultAPIVersion = "2021-10-01" imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion autoDetectRegion = "TryAutoDetect" + AccessTokenTypeBearer = "Bearer" ) // These are various hosts that host AAD Instance discovery endpoints. @@ -138,6 +139,39 @@ const ( ADFS = "ADFS" ) +// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +type AuthenticationScheme interface { + // Extra parameters that are added to the request to the /token endpoint. + TokenRequestParams() map[string]string + // Key ID of the public / private key pair used by the encryption algorithm, if any. + // Tokens obtained by authentication schemes that use this are bound to the KeyId, i.e. + // if a different kid is presented, the access token cannot be used. + KeyID() string + // Creates the access token that goes into an Authorization HTTP header. + FormatAccessToken(accessToken string) (string, error) + //Expected to match the token_type parameter returned by ESTS. Used to disambiguate + // between ATs of different types (e.g. Bearer and PoP) when loading from cache etc. + AccessTokenType() string +} + +// default authn scheme realizing AuthenticationScheme for "Bearer" tokens +type BearerAuthenticationScheme struct{} + +var bearerAuthnScheme BearerAuthenticationScheme + +func (ba *BearerAuthenticationScheme) TokenRequestParams() map[string]string { + return nil +} +func (ba *BearerAuthenticationScheme) KeyID() string { + return "" +} +func (ba *BearerAuthenticationScheme) FormatAccessToken(accessToken string) (string, error) { + return accessToken, nil +} +func (ba *BearerAuthenticationScheme) AccessTokenType() string { + return AccessTokenTypeBearer +} + // AuthParams represents the parameters used for authorization for token acquisition. type AuthParams struct { AuthorityInfo Info @@ -180,6 +214,8 @@ type AuthParams struct { LoginHint string // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page DomainHint string + // AuthnScheme is an optional scheme for formatting access tokens + AuthnScheme AuthenticationScheme } // NewAuthParams creates an authorization parameters object. @@ -188,6 +224,7 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { ClientID: clientID, AuthorityInfo: authorityInfo, CorrelationID: uuid.New().String(), + AuthnScheme: &bearerAuthnScheme, } } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go index f7e12a71bf318..d8ab713560c9f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go @@ -46,7 +46,8 @@ func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, userna // Key creates the key for storing accounts in the cache. func (acc Account) Key() string { - return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) + key := strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator) + return strings.ToLower(key) } // IsZero checks the zero value of account. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index b76c0c56962e1..eb16b405c4be8 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "1.0.0" +const Version = "1.2.0" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index cce05277e800e..e346ff3dffd05 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -24,8 +24,10 @@ import ( "crypto/rand" "crypto/sha256" "encoding/base64" + "errors" "fmt" "net/url" + "reflect" "strconv" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" @@ -45,8 +47,12 @@ import ( // For details see https://aka.ms/msal-net-authenticationresult type AuthResult = base.AuthResult +type AuthenticationScheme = authority.AuthenticationScheme + type Account = shared.Account +var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") + // clientOptions configures the Client's behavior. type clientOptions struct { accessor cache.ExportReplace @@ -207,6 +213,33 @@ func WithClaims(claims string) interface { } } +// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. +func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface { + AcquireSilentOption + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireSilentOption + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *acquireTokenSilentOptions: + t.authnScheme = authnScheme + case *interactiveAuthOptions: + t.authnScheme = authnScheme + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority]. // This option is valid for any token acquisition method. func WithTenantID(tenantID string) interface { @@ -256,6 +289,7 @@ func WithTenantID(tenantID string) interface { type acquireTokenSilentOptions struct { account Account claims, tenantID string + authnScheme AuthenticationScheme } // AcquireSilentOption is implemented by options for AcquireTokenSilent @@ -294,6 +328,10 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts if err := options.ApplyOptions(&o, opts); err != nil { return AuthResult{}, err } + // an account is required to find user tokens in the cache + if reflect.ValueOf(o.account).IsZero() { + return AuthResult{}, errNoAccount + } silentParameters := base.AcquireTokenSilentParameters{ Scopes: scopes, @@ -302,6 +340,7 @@ func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts RequestType: accesstokens.ATPublic, IsAppCache: false, TenantID: o.tenantID, + AuthnScheme: o.authnScheme, } return pca.base.AcquireTokenSilent(ctx, silentParameters) @@ -473,6 +512,8 @@ func (pca Client) RemoveAccount(ctx context.Context, account Account) error { // interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow. type interactiveAuthOptions struct { claims, domainHint, loginHint, redirectURI, tenantID string + openURL func(url string) error + authnScheme AuthenticationScheme } // AcquireInteractiveOption is implemented by options for AcquireTokenInteractive @@ -558,10 +599,33 @@ func WithRedirectURI(redirectURI string) interface { } } +// WithOpenURL allows you to provide a function to open the browser to complete the interactive login, instead of launching the system default browser. +func WithOpenURL(openURL func(url string) error) interface { + AcquireInteractiveOption + options.CallOption +} { + return struct { + AcquireInteractiveOption + options.CallOption + }{ + CallOption: options.NewCallOption( + func(a any) error { + switch t := a.(type) { + case *interactiveAuthOptions: + t.openURL = openURL + default: + return fmt.Errorf("unexpected options type %T", a) + } + return nil + }, + ), + } +} + // AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account. // https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication // -// Options: [WithDomainHint], [WithLoginHint], [WithRedirectURI], [WithTenantID] +// Options: [WithDomainHint], [WithLoginHint], [WithOpenURL], [WithRedirectURI], [WithTenantID] func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) { o := interactiveAuthOptions{} if err := options.ApplyOptions(&o, opts); err != nil { @@ -580,6 +644,9 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, return AuthResult{}, err } } + if o.openURL == nil { + o.openURL = browser.OpenURL + } authParams, err := pca.base.AuthParams.WithTenant(o.tenantID) if err != nil { return AuthResult{}, err @@ -593,7 +660,10 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, authParams.DomainHint = o.domainHint authParams.State = uuid.New().String() authParams.Prompt = "select_account" - res, err := pca.browserLogin(ctx, redirectURL, authParams) + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } + res, err := pca.browserLogin(ctx, redirectURL, authParams, o.openURL) if err != nil { return AuthResult{}, err } @@ -617,11 +687,6 @@ type interactiveAuthResult struct { redirectURI string } -// provides a test hook to simulate opening a browser -var browserOpenURL = func(authURL string) error { - return browser.OpenURL(authURL) -} - // parses the port number from the provided URL. // returns 0 if nil or no port is specified. func parsePort(u *url.URL) (int, error) { @@ -635,8 +700,8 @@ func parsePort(u *url.URL) (int, error) { return strconv.Atoi(p) } -// browserLogin launches the system browser for interactive login -func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) { +// browserLogin calls openURL and waits for a user to log in +func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams, openURL func(string) error) (interactiveAuthResult, error) { // start local redirect server so login can call us back port, err := parsePort(redirectURI) if err != nil { @@ -653,7 +718,7 @@ func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params return interactiveAuthResult{}, err } // open browser window so user can select credentials - if err := browserOpenURL(authURL); err != nil { + if err := openURL(authURL); err != nil { return interactiveAuthResult{}, err } // now wait until the logic calls us back diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore new file mode 100644 index 0000000000000..09573e0169c21 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE new file mode 100644 index 0000000000000..35dbc252041ea --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md new file mode 100644 index 0000000000000..ff9c57e1d8442 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md @@ -0,0 +1,195 @@ +# Migration Guide (v5.0.0) + +Version `v5` contains a major rework of core functionalities in the `jwt-go` +library. This includes support for several validation options as well as a +re-design of the `Claims` interface. Lastly, we reworked how errors work under +the hood, which should provide a better overall developer experience. + +Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0), +the import path will be: + + "github.com/golang-jwt/jwt/v5" + +For most users, changing the import path *should* suffice. However, since we +intentionally changed and cleaned some of the public API, existing programs +might need to be updated. The following sections describe significant changes +and corresponding updates for existing programs. + +## Parsing and Validation Options + +Under the hood, a new `Validator` struct takes care of validating the claims. A +long awaited feature has been the option to fine-tune the validation of tokens. +This is now possible with several `ParserOption` functions that can be appended +to most `Parse` functions, such as `ParseWithClaims`. The most important options +and changes are: + * Added `WithLeeway` to support specifying the leeway that is allowed when + validating time-based claims, such as `exp` or `nbf`. + * Changed default behavior to not check the `iat` claim. Usage of this claim + is OPTIONAL according to the JWT RFC. The claim itself is also purely + informational according to the RFC, so a strict validation failure is not + recommended. If you want to check for sensible values in these claims, + please use the `WithIssuedAt` parser option. + * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for + expected `aud`, `sub` and `iss`. + * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow + previously global settings to enable base64 strict encoding and the parsing + of base64 strings with padding. The latter is strictly speaking against the + standard, but unfortunately some of the major identity providers issue some + of these incorrect tokens. Both options are disabled by default. + +## Changes to the `Claims` interface + +### Complete Restructuring + +Previously, the claims interface was satisfied with an implementation of a +`Valid() error` function. This had several issues: + * The different claim types (struct claims, map claims, etc.) then contained + similar (but not 100 % identical) code of how this validation was done. This + lead to a lot of (almost) duplicate code and was hard to maintain + * It was not really semantically close to what a "claim" (or a set of claims) + really is; which is a list of defined key/value pairs with a certain + semantic meaning. + +Since all the validation functionality is now extracted into the validator, all +`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface. +Instead, the interface now represents a list of getters to retrieve values with +a specific meaning. This allows us to completely decouple the validation logic +with the underlying storage representation of the claim, which could be a +struct, a map or even something stored in a database. + +```go +type Claims interface { + GetExpirationTime() (*NumericDate, error) + GetIssuedAt() (*NumericDate, error) + GetNotBefore() (*NumericDate, error) + GetIssuer() (string, error) + GetSubject() (string, error) + GetAudience() (ClaimStrings, error) +} +``` + +Users that previously directly called the `Valid` function on their claims, +e.g., to perform validation independently of parsing/verifying a token, can now +use the `jwt.NewValidator` function to create a `Validator` independently of the +`Parser`. + +```go +var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second)) +v.Validate(myClaims) +``` + +### Supported Claim Types and Removal of `StandardClaims` + +The two standard claim types supported by this library, `MapClaims` and +`RegisteredClaims` both implement the necessary functions of this interface. The +old `StandardClaims` struct, which has already been deprecated in `v4` is now +removed. + +Users using custom claims, in most cases, will not experience any changes in the +behavior as long as they embedded `RegisteredClaims`. If they created a new +claim type from scratch, they now need to implemented the proper getter +functions. + +### Migrating Application Specific Logic of the old `Valid` + +Previously, users could override the `Valid` method in a custom claim, for +example to extend the validation with application-specific claims. However, this +was always very dangerous, since once could easily disable the standard +validation and signature checking. + +In order to avoid that, while still supporting the use-case, a new +`ClaimsValidator` interface has been introduced. This interface consists of the +`Validate() error` function. If the validator sees, that a `Claims` struct +implements this interface, the errors returned to the `Validate` function will +be *appended* to the regular standard validation. It is not possible to disable +the standard validation anymore (even only by accident). + +Usage examples can be found in [example_test.go](./example_test.go), to build +claims structs like the following. + +```go +// MyCustomClaims includes all registered claims, plus Foo. +type MyCustomClaims struct { + Foo string `json:"foo"` + jwt.RegisteredClaims +} + +// Validate can be used to execute additional application-specific claims +// validation. +func (m MyCustomClaims) Validate() error { + if m.Foo != "bar" { + return errors.New("must be foobar") + } + + return nil +} +``` + +## Changes to the `Token` and `Parser` struct + +The previously global functions `DecodeSegment` and `EncodeSegment` were moved +to the `Parser` and `Token` struct respectively. This will allow us in the +future to configure the behavior of these two based on options supplied on the +parser or the token (creation). This also removes two previously global +variables and moves them to parser options `WithStrictDecoding` and +`WithPaddingAllowed`. + +In order to do that, we had to adjust the way signing methods work. Previously +they were given a base64 encoded signature in `Verify` and were expected to +return a base64 encoded version of the signature in `Sign`, both as a `string`. +However, this made it necessary to have `DecodeSegment` and `EncodeSegment` +global and was a less than perfect design because we were repeating +encoding/decoding steps for all signing methods. Now, `Sign` and `Verify` +operate on a decoded signature as a `[]byte`, which feels more natural for a +cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of +the final encoding/decoding part. + +In addition to that, we also changed the `Signature` field on `Token` from a +`string` to `[]byte` and this is also now populated with the decoded form. This +is also more consistent, because the other parts of the JWT, mainly `Header` and +`Claims` were already stored in decoded form in `Token`. Only the signature was +stored in base64 encoded form, which was redundant with the information in the +`Raw` field, which contains the complete token as base64. + +```go +type Token struct { + Raw string // Raw contains the raw token + Method SigningMethod // Method is the signing method used or to be used + Header map[string]interface{} // Header is the first segment of the token in decoded form + Claims Claims // Claims is the second segment of the token in decoded form + Signature []byte // Signature is the third segment of the token in decoded form + Valid bool // Valid specifies if the token is valid +} +``` + +Most (if not all) of these changes should not impact the normal usage of this +library. Only users directly accessing the `Signature` field as well as +developers of custom signing methods should be affected. + +# Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), +the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in +this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should +be a drop-in replacement, if you're having troubles migrating, please open an +issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or +`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually +or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +# Older releases (before v3.2.0) + +The original migration guide for older releases can be found at +https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md new file mode 100644 index 0000000000000..964598a3173e5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/README.md @@ -0,0 +1,167 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go +Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v5.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v5) +[![Coverage Status](https://coveralls.io/repos/github/golang-jwt/jwt/badge.svg?branch=main)](https://coveralls.io/github/golang-jwt/jwt?branch=main) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) +implementation of [JSON Web +Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) +this project adds Go module support, but maintains backwards compatibility with +older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the +[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version +v5.0.0 introduces major improvements to the validation of tokens, but is not +entirely backwards compatible. + +> After the original author of the library suggested migrating the maintenance +> of `jwt-go`, a dedicated team of open source maintainers decided to clone the +> existing library into this repository. See +> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a +> detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the +crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue +[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more +detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is +what you +expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). +This library attempts to make it easy to do the right thing by requiring key +types match the expected alg, but you should take the extra step to verify it in +your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release +policy](https://golang.org/doc/devel/release#policy). So we will support a major +version of Go until there are two newer major releases. We no longer support +building jwt-go with unsupported Go versions, as these contain security +vulnerabilities which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web +Tokens. + +In short, it's a signed JSON object that does something useful (for example, +authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is +made of three parts, separated by `.`'s. The first two parts are JSON objects, +that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) +encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for +verifying the last part, the signature. For example, which encryption method +was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and +contains the actual stuff you care about. Refer to [RFC +7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about +reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and +signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, +RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Installation Guidelines + +1. To install the jwt package, you first need to have + [Go](https://go.dev/doc/install) installed, then you can use the command + below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v5 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v5" +``` + +## Usage + +A detailed usage guide, including how to sign and verify tokens can be found on +our [documentation website](https://golang-jwt.github.io/jwt/usage/create/). + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5) +for examples of usage: + +* [Simple example of parsing and validating a + token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac) +* [Simple example of building and signing a + token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac) +* [Directory of + Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples) + +## Compliance + +This library was last reviewed to comply with [RFC +7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few +notable differences: + +* In order to protect against accidental use of [Unsecured + JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using + `alg=none` will only be accepted if the constant + `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are +appreciated. The API should be considered stable. There should be very few +backwards-incompatible changes outside of major version updates (and only with +good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull +requests will land on `main`. Periodically, versions will be tagged from +`main`. You can find all the releases on [the project releases +page](https://github.com/golang-jwt/jwt/releases). + +**BREAKING CHANGES:*** A full list of breaking changes is available in +`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating +your code. + +## Extensions + +This library publishes all the necessary components for adding your own signing +methods or key functions. Simply implement the `SigningMethod` interface and +register a factory method using `RegisterSigningMethod` or provide a +`jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature +providers, like key management services from various cloud providers or Hardware +Security Modules (HSMs) or to implement additional standards. + +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by +third parties and should not be considered as a primary offer by any of the +mentioned cloud providers + +## More + +Go package documentation can be found [on +pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional +documentation can be found on [our project +page](https://golang-jwt.github.io/jwt/). + +The command line utility included in this project (cmd/jwt) provides a +straightforward example of token creation and parsing as well as a useful tool +for debugging your own integration. You'll also find several implementation +examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version +of the JWT logo, which is distributed under the terms of the [MIT +License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md new file mode 100644 index 0000000000000..b08402c3427f4 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md new file mode 100644 index 0000000000000..b5039e49c102e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md @@ -0,0 +1,137 @@ +# `jwt-go` Version History + +The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases. + +## 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +## 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +## 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +## 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +## 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +## 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go new file mode 100644 index 0000000000000..d50ff3dad8297 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go @@ -0,0 +1,16 @@ +package jwt + +// Claims represent any form of a JWT Claims Set according to +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a +// common basis for validation, it is required that an implementation is able to +// supply at least the claim names provided in +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`, +// `iat`, `nbf`, `iss`, `sub` and `aud`. +type Claims interface { + GetExpirationTime() (*NumericDate, error) + GetIssuedAt() (*NumericDate, error) + GetNotBefore() (*NumericDate, error) + GetIssuer() (string, error) + GetSubject() (string, error) + GetAudience() (ClaimStrings, error) +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go new file mode 100644 index 0000000000000..a86dc1a3b348c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go new file mode 100644 index 0000000000000..ca85659ba4bb5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// SigningMethodECDSA implements the ECDSA family of signing methods. +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error { + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return newError("ECDSA verify expects *ecsda.PublicKey", ErrInvalidKeyType) + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return nil, newError("ECDSA sign expects *ecsda.PrivateKey", ErrInvalidKeyType) + } + + // Create the hasher + if !m.Hash.Available() { + return nil, ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return nil, ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return out, nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go new file mode 100644 index 0000000000000..5700636d35b6c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go new file mode 100644 index 0000000000000..c2138119e5120 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go @@ -0,0 +1,79 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/rand" + "errors" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error { + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType) + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType) + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return nil, ErrInvalidKey + } + + // Sign the string and return the result. ed25519 performs a two-pass hash + // as part of its algorithm. Therefore, we need to pass a non-prehashed + // message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return nil, err + } + + return sig, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go new file mode 100644 index 0000000000000..cdb5e68e8767a --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go new file mode 100644 index 0000000000000..23bb616ddde3a --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go @@ -0,0 +1,49 @@ +package jwt + +import ( + "errors" + "strings" +) + +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + ErrTokenRequiredClaimMissing = errors.New("token is missing required claim") + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenInvalidSubject = errors.New("token has invalid subject") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") + ErrInvalidType = errors.New("invalid type for claim") +) + +// joinedError is an error type that works similar to what [errors.Join] +// produces, with the exception that it has a nice error string; mainly its +// error messages are concatenated using a comma, rather than a newline. +type joinedError struct { + errs []error +} + +func (je joinedError) Error() string { + msg := []string{} + for _, err := range je.errs { + msg = append(msg, err.Error()) + } + + return strings.Join(msg, ", ") +} + +// joinErrors joins together multiple errors. Useful for scenarios where +// multiple errors next to each other occur, e.g., in claims validation. +func joinErrors(errs ...error) error { + return &joinedError{ + errs: errs, + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go new file mode 100644 index 0000000000000..a893d355e1abe --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go @@ -0,0 +1,47 @@ +//go:build go1.20 +// +build go1.20 + +package jwt + +import ( + "fmt" +) + +// Unwrap implements the multiple error unwrapping for this error type, which is +// possible in Go 1.20. +func (je joinedError) Unwrap() []error { + return je.errs +} + +// newError creates a new error message with a detailed error message. The +// message will be prefixed with the contents of the supplied error type. +// Additionally, more errors, that provide more context can be supplied which +// will be appended to the message. This makes use of Go 1.20's possibility to +// include more than one %w formatting directive in [fmt.Errorf]. +// +// For example, +// +// newError("no keyfunc was provided", ErrTokenUnverifiable) +// +// will produce the error string +// +// "token is unverifiable: no keyfunc was provided" +func newError(message string, err error, more ...error) error { + var format string + var args []any + if message != "" { + format = "%w: %s" + args = []any{err, message} + } else { + format = "%w" + args = []any{err} + } + + for _, e := range more { + format += ": %w" + args = append(args, e) + } + + err = fmt.Errorf(format, args...) + return err +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go new file mode 100644 index 0000000000000..2ad542f00ca3e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go @@ -0,0 +1,78 @@ +//go:build !go1.20 +// +build !go1.20 + +package jwt + +import ( + "errors" + "fmt" +) + +// Is implements checking for multiple errors using [errors.Is], since multiple +// error unwrapping is not possible in versions less than Go 1.20. +func (je joinedError) Is(err error) bool { + for _, e := range je.errs { + if errors.Is(e, err) { + return true + } + } + + return false +} + +// wrappedErrors is a workaround for wrapping multiple errors in environments +// where Go 1.20 is not available. It basically uses the already implemented +// functionality of joinedError to handle multiple errors with supplies a +// custom error message that is identical to the one we produce in Go 1.20 using +// multiple %w directives. +type wrappedErrors struct { + msg string + joinedError +} + +// Error returns the stored error string +func (we wrappedErrors) Error() string { + return we.msg +} + +// newError creates a new error message with a detailed error message. The +// message will be prefixed with the contents of the supplied error type. +// Additionally, more errors, that provide more context can be supplied which +// will be appended to the message. Since we cannot use of Go 1.20's possibility +// to include more than one %w formatting directive in [fmt.Errorf], we have to +// emulate that. +// +// For example, +// +// newError("no keyfunc was provided", ErrTokenUnverifiable) +// +// will produce the error string +// +// "token is unverifiable: no keyfunc was provided" +func newError(message string, err error, more ...error) error { + // We cannot wrap multiple errors here with %w, so we have to be a little + // bit creative. Basically, we are using %s instead of %w to produce the + // same error message and then throw the result into a custom error struct. + var format string + var args []any + if message != "" { + format = "%s: %s" + args = []any{err, message} + } else { + format = "%s" + args = []any{err} + } + errs := []error{err} + + for _, e := range more { + format += ": %s" + args = append(args, e) + errs = append(errs, e) + } + + err = &wrappedErrors{ + msg: fmt.Sprintf(format, args...), + joinedError: joinedError{errs: errs}, + } + return err +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go new file mode 100644 index 0000000000000..96c62722d18d9 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go @@ -0,0 +1,104 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. Returns nil if +// the signature is valid. Key must be []byte. +// +// Note it is not advised to provide a []byte which was converted from a 'human +// readable' string using a subset of ASCII characters. To maximize entropy, you +// should ideally be providing a []byte key which was produced from a +// cryptographically random source, e.g. crypto/rand. Additional information +// about this, and why we intentionally are not supporting string as a key can +// be found on our usage guide +// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types. +func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return newError("HMAC verify expects []byte", ErrInvalidKeyType) + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements token signing for the SigningMethod. Key must be []byte. +// +// Note it is not advised to provide a []byte which was converted from a 'human +// readable' string using a subset of ASCII characters. To maximize entropy, you +// should ideally be providing a []byte key which was produced from a +// cryptographically random source, e.g. crypto/rand. Additional information +// about this, and why we intentionally are not supporting string as a key can +// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/. +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType) + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return hasher.Sum(nil), nil + } + + return nil, ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go new file mode 100644 index 0000000000000..b2b51a1f80657 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go @@ -0,0 +1,109 @@ +package jwt + +import ( + "encoding/json" + "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON +// decoding. This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// GetExpirationTime implements the Claims interface. +func (m MapClaims) GetExpirationTime() (*NumericDate, error) { + return m.parseNumericDate("exp") +} + +// GetNotBefore implements the Claims interface. +func (m MapClaims) GetNotBefore() (*NumericDate, error) { + return m.parseNumericDate("nbf") +} + +// GetIssuedAt implements the Claims interface. +func (m MapClaims) GetIssuedAt() (*NumericDate, error) { + return m.parseNumericDate("iat") +} + +// GetAudience implements the Claims interface. +func (m MapClaims) GetAudience() (ClaimStrings, error) { + return m.parseClaimsString("aud") +} + +// GetIssuer implements the Claims interface. +func (m MapClaims) GetIssuer() (string, error) { + return m.parseString("iss") +} + +// GetSubject implements the Claims interface. +func (m MapClaims) GetSubject() (string, error) { + return m.parseString("sub") +} + +// parseNumericDate tries to parse a key in the map claims type as a number +// date. This will succeed, if the underlying type is either a [float64] or a +// [json.Number]. Otherwise, nil will be returned. +func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) { + v, ok := m[key] + if !ok { + return nil, nil + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return nil, nil + } + + return newNumericDateFromSeconds(exp), nil + case json.Number: + v, _ := exp.Float64() + + return newNumericDateFromSeconds(v), nil + } + + return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) +} + +// parseClaimsString tries to parse a key in the map claims type as a +// [ClaimsStrings] type, which can either be a string or an array of string. +func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) { + var cs []string + switch v := m[key].(type) { + case string: + cs = append(cs, v) + case []string: + cs = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) + } + cs = append(cs, vs) + } + } + + return cs, nil +} + +// parseString tries to parse a key in the map claims type as a [string] type. +// If the key does not exist, an empty string is returned. If the key has the +// wrong type, an error is returned. +func (m MapClaims) parseString(key string) (string, error) { + var ( + ok bool + raw interface{} + iss string + ) + raw, ok = m[key] + if !ok { + return "", nil + } + + iss, ok = raw.(string) + if !ok { + return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType) + } + + return iss, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go new file mode 100644 index 0000000000000..685c2ea306552 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/none.go @@ -0,0 +1,50 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if len(sig) != 0 { + return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return []byte{}, nil + } + + return nil, NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go new file mode 100644 index 0000000000000..ecf99af78f978 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go @@ -0,0 +1,238 @@ +package jwt + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + // If populated, only these methods will be considered valid. + validMethods []string + + // Use JSON Number format in JSON decoder. + useJSONNumber bool + + // Skip claims validation during token parsing. + skipClaimsValidation bool + + validator *Validator + + decodeStrict bool + + decodePaddingAllowed bool +} + +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{ + validator: &Validator{}, + } + + // Loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the key for validating. +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.validMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.validMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid) + } + } + + // Decode signature + token.Signature, err = p.DecodeSegment(parts[2]) + if err != nil { + return token, newError("could not base64 decode signature", ErrTokenMalformed, err) + } + text := strings.Join(parts[0:2], ".") + + // Lookup key(s) + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, newError("no keyfunc was provided", ErrTokenUnverifiable) + } + + got, err := keyFunc(token) + if err != nil { + return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err) + } + + switch have := got.(type) { + case VerificationKeySet: + if len(have.Keys) == 0 { + return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable) + } + // Iterate through keys and verify signature, skipping the rest when a match is found. + // Return the last error if no match is found. + for _, key := range have.Keys { + if err = token.Method.Verify(text, token.Signature, key); err == nil { + break + } + } + default: + err = token.Method.Verify(text, token.Signature, have) + } + if err != nil { + return token, newError("", ErrTokenSignatureInvalid, err) + } + + // Validate Claims + if !p.skipClaimsValidation { + // Make sure we have at least a default validator + if p.validator == nil { + p.validator = NewValidator() + } + + if err := p.validator.Validate(claims); err != nil { + return token, newError("", ErrTokenInvalidClaims, err) + } + } + + // No errors so far, token is valid. + token.Valid = true + + return token, nil +} + +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (since it has already +// been or will be checked elsewhere in the stack) and you want to extract values from it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = p.DecodeSegment(parts[0]); err != nil { + return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err) + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err) + } + + // parse Claims + token.Claims = claims + + claimBytes, err := p.DecodeSegment(parts[1]) + if err != nil { + return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err) + } + + // If `useJSONNumber` is enabled then we must use *json.Decoder to decode + // the claims. However, this comes with a performance penalty so only use + // it if we must and, otherwise, simple use json.Unmarshal. + if !p.useJSONNumber { + // JSON Unmarshal. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = json.Unmarshal(claimBytes, &c) + } else { + err = json.Unmarshal(claimBytes, &claims) + } + } else { + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + dec.UseNumber() + // JSON Decode. Special case for map type to avoid weird pointer behavior. + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + } + if err != nil { + return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err) + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable) + } + } else { + return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable) + } + + return token, parts, nil +} + +// DecodeSegment decodes a JWT specific base64url encoding. This function will +// take into account whether the [Parser] is configured with additional options, +// such as [WithStrictDecoding] or [WithPaddingAllowed]. +func (p *Parser) DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + + if p.decodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + encoding = base64.URLEncoding + } + + if p.decodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. The caller is strongly encouraged to set the +// WithValidMethods option to validate the 'alg' claim in the token matches the +// expected algorithm. For more details about the importance of validating the +// 'alg' claim, see +// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the +// standard claims (such as RegisteredClaims), make sure that a) you either +// embed a non-pointer version of the claims or b) if you are using a pointer, +// allocate the proper memory for it before passing in the overall claims, +// otherwise you might run into a panic. +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go new file mode 100644 index 0000000000000..88a780fbd4a2e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go @@ -0,0 +1,128 @@ +package jwt + +import "time" + +// ParserOption is used to implement functional-style options that modify the +// behavior of the parser. To add new options, just create a function (ideally +// beginning with With or Without) that returns an anonymous function that takes +// a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser +// will check. Only those methods will be considered valid. It is heavily +// encouraged to use this option in order to prevent attacks such as +// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.validMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with +// UseNumber. +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.useJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This +// option should only be used if you exactly know what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.skipClaimsValidation = true + } +} + +// WithLeeway returns the ParserOption for specifying the leeway window. +func WithLeeway(leeway time.Duration) ParserOption { + return func(p *Parser) { + p.validator.leeway = leeway + } +} + +// WithTimeFunc returns the ParserOption for specifying the time func. The +// primary use-case for this is testing. If you are looking for a way to account +// for clock-skew, WithLeeway should be used instead. +func WithTimeFunc(f func() time.Time) ParserOption { + return func(p *Parser) { + p.validator.timeFunc = f + } +} + +// WithIssuedAt returns the ParserOption to enable verification +// of issued-at. +func WithIssuedAt() ParserOption { + return func(p *Parser) { + p.validator.verifyIat = true + } +} + +// WithExpirationRequired returns the ParserOption to make exp claim required. +// By default exp claim is optional. +func WithExpirationRequired() ParserOption { + return func(p *Parser) { + p.validator.requireExp = true + } +} + +// WithAudience configures the validator to require the specified audience in +// the `aud` claim. Validation will fail if the audience is not listed in the +// token or the `aud` claim is missing. +// +// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is +// application-specific. Since this validation API is helping developers in +// writing secure application, we decided to REQUIRE the existence of the claim, +// if an audience is expected. +func WithAudience(aud string) ParserOption { + return func(p *Parser) { + p.validator.expectedAud = aud + } +} + +// WithIssuer configures the validator to require the specified issuer in the +// `iss` claim. Validation will fail if a different issuer is specified in the +// token or the `iss` claim is missing. +// +// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is +// application-specific. Since this validation API is helping developers in +// writing secure application, we decided to REQUIRE the existence of the claim, +// if an issuer is expected. +func WithIssuer(iss string) ParserOption { + return func(p *Parser) { + p.validator.expectedIss = iss + } +} + +// WithSubject configures the validator to require the specified subject in the +// `sub` claim. Validation will fail if a different subject is specified in the +// token or the `sub` claim is missing. +// +// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is +// application-specific. Since this validation API is helping developers in +// writing secure application, we decided to REQUIRE the existence of the claim, +// if a subject is expected. +func WithSubject(sub string) ParserOption { + return func(p *Parser) { + p.validator.expectedSub = sub + } +} + +// WithPaddingAllowed will enable the codec used for decoding JWTs to allow +// padding. Note that the JWS RFC7515 states that the tokens will utilize a +// Base64url encoding with no padding. Unfortunately, some implementations of +// JWT are producing non-standard tokens, and thus require support for decoding. +func WithPaddingAllowed() ParserOption { + return func(p *Parser) { + p.decodePaddingAllowed = true + } +} + +// WithStrictDecoding will switch the codec used for decoding JWTs into strict +// mode. In this mode, the decoder requires that trailing padding bits are zero, +// as described in RFC 4648 section 3.5. +func WithStrictDecoding() ParserOption { + return func(p *Parser) { + p.decodeStrict = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go new file mode 100644 index 0000000000000..77951a531d91f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go @@ -0,0 +1,63 @@ +package jwt + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical use-case +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// GetExpirationTime implements the Claims interface. +func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) { + return c.ExpiresAt, nil +} + +// GetNotBefore implements the Claims interface. +func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) { + return c.NotBefore, nil +} + +// GetIssuedAt implements the Claims interface. +func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) { + return c.IssuedAt, nil +} + +// GetAudience implements the Claims interface. +func (c RegisteredClaims) GetAudience() (ClaimStrings, error) { + return c.Audience, nil +} + +// GetIssuer implements the Claims interface. +func (c RegisteredClaims) GetIssuer() (string, error) { + return c.Issuer, nil +} + +// GetSubject implements the Claims interface. +func (c RegisteredClaims) GetSubject() (string, error) { + return c.Subject, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go new file mode 100644 index 0000000000000..83cbee6ae2bab --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go @@ -0,0 +1,93 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSA implements the RSA family of signing methods. +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error { + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements token signing for the SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType) + } + + // Create the hasher + if !m.Hash.Available() { + return nil, ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return sigBytes, nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go new file mode 100644 index 0000000000000..28c386ec43aa9 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go @@ -0,0 +1,135 @@ +//go:build go1.4 +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error { + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType) + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType) + } + + // Create the hasher + if !m.Hash.Available() { + return nil, ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return sigBytes, nil + } else { + return nil, err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go new file mode 100644 index 0000000000000..b3aeebbe110b1 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go @@ -0,0 +1,107 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil { + return nil, err + } + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go new file mode 100644 index 0000000000000..0d73631c1bf36 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go @@ -0,0 +1,49 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod can be used add new methods for signing or verifying tokens. It +// takes a decoded signature as an input in the Verify function and produces a +// signature in Sign. The signature is then usually base64 encoded as part of a +// JWT. +type SigningMethod interface { + Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod retrieves a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf new file mode 100644 index 0000000000000..53745d51d7c74 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go new file mode 100644 index 0000000000000..352873a2d9c26 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/token.go @@ -0,0 +1,100 @@ +package jwt + +import ( + "crypto" + "encoding/base64" + "encoding/json" +) + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, but unverified +// Token. This allows you to use properties in the Header of the token (such as +// `kid`) to identify which key to use. +// +// The returned interface{} may be a single key or a VerificationKeySet containing +// multiple keys. +type Keyfunc func(*Token) (interface{}, error) + +// VerificationKey represents a public or secret key for verifying a token's signature. +type VerificationKey interface { + crypto.PublicKey | []uint8 +} + +// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token. +type VerificationKeySet struct { + Keys []VerificationKey +} + +// Token represents a JWT Token. Different fields will be used depending on +// whether you're creating or parsing/verifying a token. +type Token struct { + Raw string // Raw contains the raw token. Populated when you [Parse] a token + Method SigningMethod // Method is the signing method used or to be used + Header map[string]interface{} // Header is the first segment of the token in decoded form + Claims Claims // Claims is the second segment of the token in decoded form + Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token + Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token +} + +// New creates a new [Token] with the specified signing method and an empty map +// of claims. Additional options can be specified, but are currently unused. +func New(method SigningMethod, opts ...TokenOption) *Token { + return NewWithClaims(method, MapClaims{}, opts...) +} + +// NewWithClaims creates a new [Token] with the specified signing method and +// claims. Additional options can be specified, but are currently unused. +func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString creates and returns a complete, signed JWT. The token is signed +// using the SigningMethod specified in the token. Please refer to +// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types +// for an overview of the different signing methods and their respective key +// types. +func (t *Token) SignedString(key interface{}) (string, error) { + sstr, err := t.SigningString() + if err != nil { + return "", err + } + + sig, err := t.Method.Sign(sstr, key) + if err != nil { + return "", err + } + + return sstr + "." + t.EncodeSegment(sig), nil +} + +// SigningString generates the signing string. This is the most expensive part +// of the whole deal. Unless you need this for something special, just go +// straight for the SignedString. +func (t *Token) SigningString() (string, error) { + h, err := json.Marshal(t.Header) + if err != nil { + return "", err + } + + c, err := json.Marshal(t.Claims) + if err != nil { + return "", err + } + + return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding +// stripped. In the future, this function might take into account a +// [TokenOption]. Therefore, this function exists as a method of [Token], rather +// than a global function. +func (*Token) EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go new file mode 100644 index 0000000000000..b4ae3badf8e48 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go @@ -0,0 +1,5 @@ +package jwt + +// TokenOption is a reserved type, which provides some forward compatibility, +// if we ever want to introduce token creation-related options. +type TokenOption func(*Token) diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go new file mode 100644 index 0000000000000..b2655a9e6d26d --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/types.go @@ -0,0 +1,149 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. This +// has an influence on the precision of times when comparing expiry or other +// related time fields. Furthermore, it is also the precision of times when +// serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type, +// especially its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the +// behavior of the underlying []string. If it is set to false, it will serialize +// to a single string, if it contains one element. Otherwise, it will serialize +// to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which returns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and +// deserializes a [NumericDate] from a JSON representation, i.e. a +// [json.Number]. This number represents an UNIX epoch with either integer or +// non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either +// serialized from a string array or just a string. This type is necessary, +// since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return ErrInvalidType + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return ErrInvalidType + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. + // used by the "aud" field, only contains one element, it MAY be serialized + // as a single string. This may or may not be desired based on the ecosystem + // of other JWT library used, so we make it configurable by the variable + // MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go new file mode 100644 index 0000000000000..008ecd8712ec5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go @@ -0,0 +1,316 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// ClaimsValidator is an interface that can be implemented by custom claims who +// wish to execute any additional claims validation based on +// application-specific logic. The Validate function is then executed in +// addition to the regular claims validation and any error returned is appended +// to the final validation result. +// +// type MyCustomClaims struct { +// Foo string `json:"foo"` +// jwt.RegisteredClaims +// } +// +// func (m MyCustomClaims) Validate() error { +// if m.Foo != "bar" { +// return errors.New("must be foobar") +// } +// return nil +// } +type ClaimsValidator interface { + Claims + Validate() error +} + +// Validator is the core of the new Validation API. It is automatically used by +// a [Parser] during parsing and can be modified with various parser options. +// +// The [NewValidator] function should be used to create an instance of this +// struct. +type Validator struct { + // leeway is an optional leeway that can be provided to account for clock skew. + leeway time.Duration + + // timeFunc is used to supply the current time that is needed for + // validation. If unspecified, this defaults to time.Now. + timeFunc func() time.Time + + // requireExp specifies whether the exp claim is required + requireExp bool + + // verifyIat specifies whether the iat (Issued At) claim will be verified. + // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this + // only specifies the age of the token, but no validation check is + // necessary. However, if wanted, it can be checked if the iat is + // unrealistic, i.e., in the future. + verifyIat bool + + // expectedAud contains the audience this token expects. Supplying an empty + // string will disable aud checking. + expectedAud string + + // expectedIss contains the issuer this token expects. Supplying an empty + // string will disable iss checking. + expectedIss string + + // expectedSub contains the subject this token expects. Supplying an empty + // string will disable sub checking. + expectedSub string +} + +// NewValidator can be used to create a stand-alone validator with the supplied +// options. This validator can then be used to validate already parsed claims. +// +// Note: Under normal circumstances, explicitly creating a validator is not +// needed and can potentially be dangerous; instead functions of the [Parser] +// class should be used. +// +// The [Validator] is only checking the *validity* of the claims, such as its +// expiration time, but it does NOT perform *signature verification* of the +// token. +func NewValidator(opts ...ParserOption) *Validator { + p := NewParser(opts...) + return p.validator +} + +// Validate validates the given claims. It will also perform any custom +// validation if claims implements the [ClaimsValidator] interface. +// +// Note: It will NOT perform any *signature verification* on the token that +// contains the claims and expects that the [Claim] was already successfully +// verified. +func (v *Validator) Validate(claims Claims) error { + var ( + now time.Time + errs []error = make([]error, 0, 6) + err error + ) + + // Check, if we have a time func + if v.timeFunc != nil { + now = v.timeFunc() + } else { + now = time.Now() + } + + // We always need to check the expiration time, but usage of the claim + // itself is OPTIONAL by default. requireExp overrides this behavior + // and makes the exp claim mandatory. + if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil { + errs = append(errs, err) + } + + // We always need to check not-before, but usage of the claim itself is + // OPTIONAL. + if err = v.verifyNotBefore(claims, now, false); err != nil { + errs = append(errs, err) + } + + // Check issued-at if the option is enabled + if v.verifyIat { + if err = v.verifyIssuedAt(claims, now, false); err != nil { + errs = append(errs, err) + } + } + + // If we have an expected audience, we also require the audience claim + if v.expectedAud != "" { + if err = v.verifyAudience(claims, v.expectedAud, true); err != nil { + errs = append(errs, err) + } + } + + // If we have an expected issuer, we also require the issuer claim + if v.expectedIss != "" { + if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil { + errs = append(errs, err) + } + } + + // If we have an expected subject, we also require the subject claim + if v.expectedSub != "" { + if err = v.verifySubject(claims, v.expectedSub, true); err != nil { + errs = append(errs, err) + } + } + + // Finally, we want to give the claim itself some possibility to do some + // additional custom validation based on a custom Validate function. + cvt, ok := claims.(ClaimsValidator) + if ok { + if err := cvt.Validate(); err != nil { + errs = append(errs, err) + } + } + + if len(errs) == 0 { + return nil + } + + return joinErrors(errs...) +} + +// verifyExpiresAt compares the exp claim in claims against cmp. This function +// will succeed if cmp < exp. Additional leeway is taken into account. +// +// If exp is not set, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error { + exp, err := claims.GetExpirationTime() + if err != nil { + return err + } + + if exp == nil { + return errorIfRequired(required, "exp") + } + + return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired) +} + +// verifyIssuedAt compares the iat claim in claims against cmp. This function +// will succeed if cmp >= iat. Additional leeway is taken into account. +// +// If iat is not set, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error { + iat, err := claims.GetIssuedAt() + if err != nil { + return err + } + + if iat == nil { + return errorIfRequired(required, "iat") + } + + return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued) +} + +// verifyNotBefore compares the nbf claim in claims against cmp. This function +// will return true if cmp >= nbf. Additional leeway is taken into account. +// +// If nbf is not set, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error { + nbf, err := claims.GetNotBefore() + if err != nil { + return err + } + + if nbf == nil { + return errorIfRequired(required, "nbf") + } + + return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet) +} + +// verifyAudience compares the aud claim against cmp. +// +// If aud is not set or an empty list, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error { + aud, err := claims.GetAudience() + if err != nil { + return err + } + + if len(aud) == 0 { + return errorIfRequired(required, "aud") + } + + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if stringClaims == "" { + return errorIfRequired(required, "aud") + } + + return errorIfFalse(result, ErrTokenInvalidAudience) +} + +// verifyIssuer compares the iss claim in claims against cmp. +// +// If iss is not set, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error { + iss, err := claims.GetIssuer() + if err != nil { + return err + } + + if iss == "" { + return errorIfRequired(required, "iss") + } + + return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer) +} + +// verifySubject compares the sub claim against cmp. +// +// If sub is not set, it will succeed if the claim is not required, +// otherwise ErrTokenRequiredClaimMissing will be returned. +// +// Additionally, if any error occurs while retrieving the claim, e.g., when its +// the wrong type, an ErrTokenUnverifiable error will be returned. +func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error { + sub, err := claims.GetSubject() + if err != nil { + return err + } + + if sub == "" { + return errorIfRequired(required, "sub") + } + + return errorIfFalse(sub == cmp, ErrTokenInvalidSubject) +} + +// errorIfFalse returns the error specified in err, if the value is true. +// Otherwise, nil is returned. +func errorIfFalse(value bool, err error) error { + if value { + return nil + } else { + return err + } +} + +// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is +// true. Otherwise, nil is returned. +func errorIfRequired(required bool, claim string) error { + if required { + return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing) + } else { + return nil + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 087320da7f0f5..0f5b8a48c6b97 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -5,7 +5,7 @@ // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. +// [reflect.DeepEqual] for comparing whether two values are semantically equal. // It is intended to only be used in tests, as performance is not a goal and // it may panic if it cannot compare the values. Its propensity towards // panicking means that its unsuitable for production environments where a @@ -18,16 +18,17 @@ // For example, an equality function may report floats as equal so long as // they are within some tolerance of each other. // -// - Types with an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation -// for the types that they define. +// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method +// to determine equality. This allows package authors to determine +// the equality operation for the types that they define. // // - If no custom equality functions are used and no Equal method is defined, // equality is determined by recursively comparing the primitive kinds on -// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], // unexported fields are not compared by default; they result in panics -// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) -// or explicitly compared using the Exporter option. +// unless suppressed by using an [Ignore] option +// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) +// or explicitly compared using the [Exporter] option. package cmp import ( @@ -45,14 +46,14 @@ import ( // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// - Let S be the set of all Ignore, Transformer, and Comparer options that +// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that // remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is non-zero, +// If at least one [Ignore] exists in S, then the comparison is ignored. +// If the number of [Transformer] and [Comparer] options in S is non-zero, // then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform +// If S contains a single [Transformer], then use that to transform // the current values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. +// If S contains a single [Comparer], then use that to compare the current values. // Otherwise, evaluation proceeds to the next rule. // // - If the values have an Equal method of the form "(T) Equal(T) bool" or @@ -66,21 +67,22 @@ import ( // Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option -// explicitly permits comparing the unexported field. +// If a struct contains unexported fields, Equal panics unless an [Ignore] option +// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field +// or the [Exporter] option explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. // Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Maps are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored map entries report equal. // Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// To use custom comparisons for map keys, consider using +// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. // Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using cmpopts.EquateEmpty. +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. // // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export.go similarity index 94% rename from vendor/github.com/google/go-cmp/cmp/export_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/export.go index e2c0f74e8393e..29f82fe6b2f80 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package cmp import ( @@ -12,8 +9,6 @@ import ( "unsafe" ) -const supportExporters = true - // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go deleted file mode 100644 index ae851fe53f29a..0000000000000 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package cmp - -import "reflect" - -const supportExporters = false - -func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { - panic("no support for forcibly accessing unexported fields") -} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go similarity index 95% rename from vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go rename to vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go index 16e6860af6e1a..e5dfff69afa15 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego -// +build !purego - package value import ( diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go deleted file mode 100644 index 1a71bfcbd39b1..0000000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -package value - -import "reflect" - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p uintptr - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // NOTE: Storing a pointer as an uintptr is technically incorrect as it - // assumes that the GC implementation does not use a moving collector. - return Pointer{v.Pointer(), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == 0 -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return p.p -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 1f9ca9c4892b5..754496f3b3f84 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -13,15 +13,15 @@ import ( "github.com/google/go-cmp/cmp/internal/function" ) -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), +// Option configures for specific behavior of [Equal] and [Diff]. In particular, +// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), // configure how equality is determined. // -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. +// The fundamental options may be composed with filters ([FilterPath] and +// [FilterValues]) to control the scope over which they are applied. // -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. +// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions +// for creating options that may be used with [Equal] and [Diff]. type Option interface { // filter applies all filters and returns the option that remains. // Each option may only read s.curPath and call s.callTTBFunc. @@ -56,9 +56,9 @@ type core struct{} func (core) isCore() {} -// Options is a list of Option values that also satisfies the Option interface. +// Options is a list of [Option] values that also satisfies the [Option] interface. // Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, +// [Option] values into a single [Option]. When this package processes an Options, // it will be implicitly expanded into a flat list. // // Applying a filter on an Options is equivalent to applying that same filter @@ -105,16 +105,16 @@ func (opts Options) String() string { return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) } -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. +// FilterPath returns a new [Option] where opt is only evaluated if filter f +// returns true for the current [Path] in the value tree. // // This filter is called even if a slice element or map entry is missing and // provides an opportunity to ignore such cases. The filter function must be // symmetric such that the filter result is identical regardless of whether the // missing value is from x or y. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterPath(f func(Path) bool, opt Option) Option { if f == nil { panic("invalid path filter function") @@ -142,7 +142,7 @@ func (f pathFilter) String() string { return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) } -// FilterValues returns a new Option where opt is only evaluated if filter f, +// FilterValues returns a new [Option] where opt is only evaluated if filter f, // which is a function of the form "func(T, T) bool", returns true for the // current pair of values being compared. If either value is invalid or // the type of the values is not assignable to T, then this filter implicitly @@ -154,8 +154,8 @@ func (f pathFilter) String() string { // If T is an interface, it is possible that f is called with two values with // different concrete types that both implement T. // -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. func FilterValues(f interface{}, opt Option) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { @@ -192,9 +192,9 @@ func (f valuesFilter) String() string { return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) } -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. +// Ignore is an [Option] that causes all comparisons to be ignored. +// This value is intended to be combined with [FilterPath] or [FilterValues]. +// It is an error to pass an unfiltered Ignore option to [Equal]. func Ignore() Option { return ignore{} } type ignore struct{ core } @@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) { name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" + } else if t.Comparable() { + help = "consider using cmpopts.EquateComparable to compare comparable Go types" } } else { // Unnamed type with unexported fields. Derive PkgPath from field. @@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*` var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) -// Transformer returns an Option that applies a transformation function that +// Transformer returns an [Option] that applies a transformation function that // converts values of a certain type into that of another. // // The transformer f must be a function "func(T) R" that converts values of @@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) // same transform to the output of itself (e.g., in the case where the // input and output types are the same), an implicit filter is added such that // a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. +// in the tail of the [Path] since the last non-[Transform] step. // For situations where the implicit filter is still insufficient, -// consider using cmpopts.AcyclicTransformer, which adds a filter -// to prevent the transformer from being recursively applied upon itself. +// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], +// which adds a filter to prevent the transformer from +// being recursively applied upon itself. // -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep (and eventually shown in the Diff output). +// The name is a user provided label that is used as the [Transform.Name] in the +// transformation [PathStep] (and eventually shown in the [Diff] output). // The name must be a valid identifier or qualified identifier in Go syntax. // If empty, an arbitrary name is used. func Transformer(name string, f interface{}) Option { @@ -329,7 +332,7 @@ func (tr transformer) String() string { return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) } -// Comparer returns an Option that determines whether two values are equal +// Comparer returns an [Option] that determines whether two values are equal // to each other. // // The comparer f must be a function "func(T, T) bool" and is implicitly @@ -377,35 +380,32 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// Exporter returns an Option that specifies whether Equal is allowed to +// Exporter returns an [Option] that specifies whether [Equal] is allowed to // introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal +// implementation of some external package may cause the result of [Equal] // to unexpectedly change. However, it may be valid to use this option on types // defined in an internal package where the semantic meaning of an unexported // field is in the control of the user. // -// In many cases, a custom Comparer should be used instead that defines +// In many cases, a custom [Comparer] should be used instead that defines // equality as a function of the public API of a type rather than the underlying // unexported implementation. // -// For example, the reflect.Type documentation defines equality to be determined +// For example, the [reflect.Type] documentation defines equality to be determined // by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested +// comparison) and most attempts to compare *[regexp.Regexp] types are interested // in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: +// Both of these are accomplished using [Comparer] options: // // Comparer(func(x, y reflect.Type) bool { return x == y }) // Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) // -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. +// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] +// option can be used to ignore all unexported fields on specified struct types. func Exporter(f func(reflect.Type) bool) Option { - if !supportExporters { - panic("Exporter is not supported on purego builds") - } return exporter(f) } @@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO panic("not implemented") } -// AllowUnexported returns an Options that allows Equal to forcibly introspect +// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect // unexported fields of the specified struct types. // -// See Exporter for the proper use of this option. +// See [Exporter] for the proper use of this option. func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { @@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Report (see Reporter). +// is provided by cmp when calling Report (see [Reporter]). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags @@ -445,7 +445,7 @@ func (r Result) Equal() bool { } // ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if Equal reports false. +// This never reports true if [Result.Equal] reports false. func (r Result) ByIgnore() bool { return r.flags&reportByIgnore != 0 } @@ -455,7 +455,7 @@ func (r Result) ByMethod() bool { return r.flags&reportByMethod != 0 } -// ByFunc reports whether a Comparer function determined equality. +// ByFunc reports whether a [Comparer] function determined equality. func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } @@ -478,7 +478,7 @@ const ( reportByCycle ) -// Reporter is an Option that can be passed to Equal. When Equal traverses +// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses // the value trees, it calls PushStep as it descends into each node in the // tree and PopStep as it ascend out of the node. The leaves of the tree are // either compared (determined to be equal or not equal) or ignored and reported diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index a0a588502ed67..c3c1456423ce1 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -14,9 +14,9 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// Path is a list of PathSteps describing the sequence of operations to get +// Path is a list of [PathStep] describing the sequence of operations to get // from some root type to the current position in the value tree. -// The first Path element is always an operation-less PathStep that exists +// The first Path element is always an operation-less [PathStep] that exists // simply to identify the initial type. // // When traversing structs with embedded structs, the embedded struct will @@ -29,8 +29,13 @@ type Path []PathStep // a value's tree structure. Users of this package never need to implement // these types as values of this type will be returned by this package. // -// Implementations of this interface are -// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +// Implementations of this interface: +// - [StructField] +// - [SliceIndex] +// - [MapIndex] +// - [Indirect] +// - [TypeAssertion] +// - [Transform] type PathStep interface { String() string @@ -70,8 +75,9 @@ func (pa *Path) pop() { *pa = (*pa)[:len(*pa)-1] } -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +// Last returns the last [PathStep] in the Path. +// If the path is empty, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Last() PathStep { return pa.Index(-1) } @@ -79,7 +85,8 @@ func (pa Path) Last() PathStep { // Index returns the ith step in the Path and supports negative indexing. // A negative index starts counting from the tail of the Path such that -1 // refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +// If index is invalid, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. func (pa Path) Index(i int) PathStep { if i < 0 { i = len(pa) + i @@ -168,7 +175,8 @@ func (ps pathStep) String() string { return fmt.Sprintf("{%s}", s) } -// StructField represents a struct field access on a field called Name. +// StructField is a [PathStep] that represents a struct field access +// on a field called [StructField.Name]. type StructField struct{ *structField } type structField struct { pathStep @@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } func (sf StructField) Name() string { return sf.name } // Index is the index of the field in the parent struct type. -// See reflect.Type.Field. +// See [reflect.Type.Field]. func (sf StructField) Index() int { return sf.idx } -// SliceIndex is an index operation on a slice or array at some index Key. +// SliceIndex is a [PathStep] that represents an index operation on +// a slice or array at some index [SliceIndex.Key]. type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep @@ -247,12 +256,12 @@ func (si SliceIndex) Key() int { // all of the indexes to be shifted. If an index is -1, then that // indicates that the element does not exist in the associated slice. // -// Key is guaranteed to return -1 if and only if the indexes returned -// by SplitKeys are not the same. SplitKeys will never return -1 for +// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes +// returned by SplitKeys are not the same. SplitKeys will never return -1 for // both indexes. func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } -// MapIndex is an index operation on a map at some index Key. +// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. type MapIndex struct{ *mapIndex } type mapIndex struct { pathStep @@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", // Key is the value of the map key. func (mi MapIndex) Key() reflect.Value { return mi.key } -// Indirect represents pointer indirection on the parent type. +// Indirect is a [PathStep] that represents pointer indirection on the parent type. type Indirect struct{ *indirect } type indirect struct { pathStep @@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ } func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } func (in Indirect) String() string { return "*" } -// TypeAssertion represents a type assertion on an interface. +// TypeAssertion is a [PathStep] that represents a type assertion on an interface. type TypeAssertion struct{ *typeAssertion } type typeAssertion struct { pathStep @@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } -// Transform is a transformation from the parent type to the current type. +// Transform is a [PathStep] that represents a transformation +// from the parent type to the current type. type Transform struct{ *transform } type transform struct { pathStep @@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ } func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } -// Name is the name of the Transformer. +// Name is the name of the [Transformer]. func (tf Transform) Name() string { return tf.trans.name } // Func is the function pointer to the transformer function. func (tf Transform) Func() reflect.Value { return tf.trans.fnc } -// Option returns the originally constructed Transformer option. +// Option returns the originally constructed [Transformer] option. // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2ab41fad3fb55..e39f42284ee83 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, break } sf := t.Field(i) - if supportExporters && !isExported(sf.Name) { + if !isExported(sf.Name) { vv = retrieveUnexportedField(v, sf, true) } s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md index d566950f385f5..fe0f5c1da8131 100644 --- a/vendor/github.com/google/s2a-go/README.md +++ b/vendor/github.com/google/s2a-go/README.md @@ -10,8 +10,5 @@ Session Agent during the TLS handshake, and to encrypt traffic to the peer after the TLS handshake is complete. This repository contains the source code for the Secure Session Agent's Go -client libraries, which allow gRPC-Go applications to use the Secure Session -Agent. This repository supports the Bazel and Golang build systems. - -All code in this repository is experimental and subject to change. We do not -guarantee API stability at this time. +client libraries, which allow gRPC and HTTP Go applications to use the Secure Session +Agent. diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go index 49573af887c06..ed449653702e4 100644 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -21,50 +21,27 @@ package service import ( "context" - "net" - "os" - "strings" "sync" - "time" - "google.golang.org/appengine" - "google.golang.org/appengine/socket" grpc "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) -// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. -const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" - var ( - // appEngineDialerHook is an AppEngine-specific dial option that is set - // during init time. If nil, then the application is not running on Google - // AppEngine. - appEngineDialerHook func(context.Context) grpc.DialOption // mu guards hsConnMap and hsDialer. mu sync.Mutex // hsConnMap represents a mapping from an S2A handshaker service address // to a corresponding connection to an S2A handshaker service instance. hsConnMap = make(map[string]*grpc.ClientConn) // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial + hsDialer = grpc.DialContext ) -func init() { - if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { - return - } - appEngineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} - // Dial dials the S2A handshaker service. If a connection has already been // established, this function returns it. Otherwise, a new connection is // created. -func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { +func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) { mu.Lock() defer mu.Unlock() @@ -72,17 +49,14 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { if !ok { // Create a new connection to the S2A handshaker service. Note that // this connection stays open until the application is closed. - grpcOpts := []grpc.DialOption{ - grpc.WithInsecure(), - } - if enableAppEngineDialer() && appEngineDialerHook != nil { - if grpclog.V(1) { - grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") - } - grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + var grpcOpts []grpc.DialOption + if transportCreds != nil { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds)) + } else { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) } var err error - hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...) if err != nil { return nil, err } @@ -90,10 +64,3 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } - -func enableAppEngineDialer() bool { - if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { - return true - } - return false -} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go index 33fa3c55d47b4..e51199ab3ac27 100644 --- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -83,13 +83,15 @@ func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete ch t.ensureProcessSessionTickets.Done() } }() - hsConn, err := service.Dial(t.hsAddr) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + // The transportCreds only needs to be set when talking to S2AV2 and also + // if mTLS is required. + hsConn, err := service.Dial(ctx, t.hsAddr, nil) if err != nil { return err } client := s2apb.NewS2AServiceClient(hsConn) - ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) - defer cancel() session, err := client.SetUpSession(ctx) if err != nil { return err diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index ff172883f246d..85a8379d83325 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -33,6 +33,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/retry" "github.com/google/s2a-go/stream" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -44,18 +45,19 @@ import ( const ( s2aSecurityProtocol = "tls" - defaultS2ATimeout = 3 * time.Second + defaultS2ATimeout = 6 * time.Second ) // An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. const s2aTimeoutEnv = "S2A_TIMEOUT" type s2av2TransportCreds struct { - info *credentials.ProtocolInfo - isClient bool - serverName string - s2av2Address string - tokenManager *tokenmanager.AccessTokenManager + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + transportCreds credentials.TransportCredentials + tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. @@ -68,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -79,6 +81,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver isClient: true, serverName: "", s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentity: localIdentity, verificationMode: verificationMode, fallbackClientHandshake: fallbackClientHandshakeFunc, @@ -98,7 +101,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -107,6 +110,7 @@ func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, }, isClient: false, s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentities: localIdentities, verificationMode: verificationMode, getS2AStream: getS2AStream, @@ -131,7 +135,13 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori serverName := removeServerNamePort(serverAuthority) timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(timeoutCtx, + func() error { + s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -152,31 +162,34 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori tokenManager = *c.tokenManager } - if c.serverName == "" { - config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - } else { - config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err + sn := serverName + if c.serverName != "" { + sn = c.serverName + } + retry.Run(timeoutCtx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + return err + }) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) } + return nil, nil, err } if grpclog.V(1) { grpclog.Infof("Got client TLS config from S2Av2.") } - creds := credentials.NewTLS(config) - conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + creds := credentials.NewTLS(config) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(timeoutCtx, + func() error { + conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) + return err + }) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -196,7 +209,13 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(ctx, + func() error { + s2AStream, err = createStream(ctx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, nil, err @@ -213,7 +232,12 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede tokenManager = *c.tokenManager } - config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + var config *tls.Config + retry.Run(ctx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + return err + }) if err != nil { grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) return nil, nil, err @@ -221,8 +245,20 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede if grpclog.V(1) { grpclog.Infof("Got server TLS config from S2Av2.") } + creds := credentials.NewTLS(config) - return creds.ServerHandshake(rawConn) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(ctx, + func() error { + conn, authInfo, err = creds.ServerHandshake(rawConn) + return err + }) + if err != nil { + grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) + return nil, nil, err + } + return conn, authInfo, err } // Info returns protocol info of s2av2TransportCreds. @@ -278,11 +314,12 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { func NewClientTLSConfig( ctx context.Context, s2av2Address string, + transportCreds credentials.TransportCredentials, tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, nil) + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -325,12 +362,12 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } // TODO(rmehta19): Consider whether to close the connection to S2Av2. - conn, err := service.Dial(s2av2Address) + conn, err := service.Dial(ctx, s2av2Address, transportCreds) if err != nil { return nil, err } diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go new file mode 100644 index 0000000000000..f7e0a23779f41 --- /dev/null +++ b/vendor/github.com/google/s2a-go/retry/retry.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package retry provides a retry helper for talking to S2A gRPC server. +// The implementation is modeled after +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go +package retry + +import ( + "context" + "math/rand" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + maxRetryAttempts = 5 + maxRetryForLoops = 10 +) + +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +// Pause returns a duration, which is used as the backoff wait time +// before the next retry. +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// Sleep will wait for the specified duration or return on context +// expiration. +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// NewRetryer creates an instance of S2ARetryer using the defaultBackoff +// implementation. +var NewRetryer = func() *S2ARetryer { + return &S2ARetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +// S2ARetryer implements a retry helper for talking to S2A gRPC server. +type S2ARetryer struct { + bo backoff + attempts int +} + +// Attempts return the number of retries attempted. +func (r *S2ARetryer) Attempts() int { + return r.attempts +} + +// Retry returns a boolean indicating whether retry should be performed +// and the backoff duration. +func (r *S2ARetryer) Retry(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + if r.attempts >= maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +// Run uses S2ARetryer to execute the function passed in, until success or reaching +// max number of retry attempts. +func Run(ctx context.Context, f func() error) { + retryer := NewRetryer() + forLoopCnt := 0 + var err error + for { + err = f() + if bo, shouldRetry := retryer.Retry(err); shouldRetry { + if grpclog.V(1) { + grpclog.Infof("will attempt retry: %v", err) + } + if ctx.Err() != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to context error: %v", ctx.Err()) + } + break + } + if errSleep := Sleep(ctx, bo); errSleep != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to sleep error: %v", errSleep) + } + break + } + // This shouldn't happen, just make sure we are not stuck in the for loops. + forLoopCnt++ + if forLoopCnt > maxRetryForLoops { + if grpclog.V(1) { + grpclog.Infof("exit the for loop after too many retries") + } + break + } + continue + } + if grpclog.V(1) { + grpclog.Infof("retry conditions not met, exit the loop") + } + break + } +} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 1c1349de4afdc..5ecb06f930eb0 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,6 +35,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" + "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -111,7 +112,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -146,7 +147,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -155,17 +156,17 @@ func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority return nil, nil, errors.New("client handshake called using server transport credentials") } + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - opts := &handshaker.ClientHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -203,16 +204,16 @@ func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credent return nil, nil, errors.New("server handshake called using client transport credentials") } + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - opts := &handshaker.ServerHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -312,6 +313,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err grpclog.Infof("Access token manager not initialized: %v", err) return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -319,6 +321,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err } return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -327,6 +330,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err type s2aTLSClientConfigFactory struct { s2av2Address string + transportCreds credentials.TransportCredentials tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte @@ -338,7 +342,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -390,9 +394,15 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net } timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) defer cancel() - s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ - ServerName: serverName, - }) + + var s2aTLSConfig *tls.Config + retry.Run(timeoutCtx, + func() error { + s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + return err + }) if err != nil { grpclog.Infof("error building S2A TLS config: %v", err) return fallback(err) @@ -401,7 +411,12 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net s2aDialer := &tls.Dialer{ Config: s2aTLSConfig, } - c, err := s2aDialer.DialContext(ctx, network, addr) + var c net.Conn + retry.Run(timeoutCtx, + func() error { + c, err = s2aDialer.DialContext(timeoutCtx, network, addr) + return err + }) if err != nil { grpclog.Infof("error dialing with S2A to %s: %v", addr, err) return fallback(err) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index 94feafb9cf8d9..fcdbc1621bd18 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -26,6 +26,7 @@ import ( "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/stream" + "google.golang.org/grpc/credentials" s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ) @@ -92,6 +93,9 @@ type ClientOptions struct { LocalIdentity Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // EnsureProcessSessionTickets waits for all session tickets to be sent to // S2A before a process completes. // @@ -173,6 +177,9 @@ type ServerOptions struct { LocalIdentities []Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // If true, enables the use of legacy S2Av1. EnableLegacyMode bool // VerificationMode specifies the mode that S2A must use to verify the diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem new file mode 100644 index 0000000000000..60c4cf0691574 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD +VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy +MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl +c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn +3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO +7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb +A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T +cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO +VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL +dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI +je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ +l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 +YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM +E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK +BTq2PBvc59T6OFLq +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem new file mode 100644 index 0000000000000..9d112d1e9ff9a --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH +65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM +FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 +rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 +M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf +2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 +ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA +NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 +LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT +EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW +/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ +XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI +wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU +lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC +k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK +UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB +8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o +4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB +Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs +FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv +r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap +CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 +w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr +63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 +Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r +fVMrcL3jSf/W1Xh4TgtyoU8= +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem new file mode 100644 index 0000000000000..44e436f6ec7cb --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 +MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE +CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW +QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 +r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg +Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n +rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe +d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR +MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ +yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN +XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x +fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN +9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa +VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G +MTV7jmY9TBPtfhRuO/cG650+F+cw +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem new file mode 100644 index 0000000000000..68c60613458ab --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 +MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE +AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh +HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn +H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK +GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA +Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe +LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA +AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G +A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB +AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT +EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn +JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe +2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs +HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI +4Wcvfz/isxgmH1UqIt3oc6ad +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem new file mode 100644 index 0000000000000..b14ad0f724eef --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 +GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza +ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 +KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 +CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 +CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd +Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz +yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ +Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V +4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY +QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy +0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp +2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms +GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz +wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ +SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 +cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f +R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn +htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi +AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw +O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh +cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 +EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw +SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x +gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe +1ChfPP1AH+/75MJCvu6wQBQv +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem new file mode 100644 index 0000000000000..ad1bad5984598 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE +AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 +MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE +CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe +2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW +HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB +cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 +5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z +ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA +ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG +lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ +XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt +Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 +ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU +gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem new file mode 100644 index 0000000000000..bcf08e4f12f4b --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS +xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR +4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS +n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp +B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 +vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN +DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S +pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj +ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF +p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn +r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B +7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ +Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 +fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz +1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk +emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP +ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw +Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF +vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD +B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh +eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi +elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 +Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo +mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk +k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ +8x3gNkxJRb4NaLIoNzAhCoo= +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 2bd78667afbb3..7ec5ac7ea9090 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,36 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + ## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 5566888726d98..a502fdc515ac9 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -11,7 +11,7 @@ please explain why in the pull request description. ### Releasing -Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits that would precipitate a SemVer change, as described in the Conventional Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) to create a release candidate pull request. Once submitted, `release-please` will create a release. diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec2742..dc60082d3b3b1 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc87aa..c351129279f32 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a56138cc4bd04..5232b486780d6 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -182,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -294,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 0000000000000..339a959a7a262 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 0000000000000..3167b643d4594 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index b3283b8158837..ea5beb5aa7687 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -35,6 +35,8 @@ import ( const signAPI = "EnterpriseCertSigner.Sign" const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" const publicKeyAPI = "EnterpriseCertSigner.Public" +const encryptAPI = "EnterpriseCertSigner.Encrypt" +const decryptAPI = "EnterpriseCertSigner.Decrypt" // A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. type Connection struct { @@ -54,13 +56,28 @@ func (c *Connection) Close() error { func init() { gob.Register(crypto.SHA256) + gob.Register(crypto.SHA384) + gob.Register(crypto.SHA512) gob.Register(&rsa.PSSOptions{}) + gob.Register(&rsa.OAEPOptions{}) } -// SignArgs contains arguments to a crypto Signer.Sign method. +// SignArgs contains arguments for a Sign API call. type SignArgs struct { Digest []byte // The content to sign. - Opts crypto.SignerOpts // Options for signing, such as Hash identifier. + Opts crypto.SignerOpts // Options for signing. Must implement HashFunc(). +} + +// EncryptArgs contains arguments for an Encrypt API call. +type EncryptArgs struct { + Plaintext []byte // The plaintext to encrypt. + Opts any // Options for encryption. Ex: an instance of crypto.Hash. +} + +// DecryptArgs contains arguments to for a Decrypt API call. +type DecryptArgs struct { + Ciphertext []byte // The ciphertext to decrypt. + Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions. } // Key implements credential.Credential by holding the executed signer subprocess. @@ -98,7 +115,7 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) @@ -107,6 +124,18 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ return } +// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts. +func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) { + err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext) + return +} + +// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface. +func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext) + return +} + // ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, // possibly due to missing config or missing binary path. var ErrCredUnavailable = errors.New("Cred is unavailable") @@ -120,7 +149,12 @@ var ErrCredUnavailable = errors.New("Cred is unavailable") // The config file also specifies which certificate the signer should use. func Cred(configFilePath string) (*Key, error) { if configFilePath == "" { - configFilePath = util.GetDefaultConfigFilePath() + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 1640ec1c9e31a..f374a7f55f470 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -22,6 +22,7 @@ import ( "os/user" "path/filepath" "runtime" + "strings" ) const configFileName = "certificate_config.json" @@ -63,6 +64,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if signerBinaryPath == "" { return "", ErrConfigUnavailable } + + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir()) + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir()) return signerBinaryPath, nil } @@ -89,3 +93,8 @@ func getDefaultConfigFileDirectory() (directory string) { func GetDefaultConfigFilePath() (path string) { return filepath.Join(getDefaultConfigFileDirectory(), configFileName) } + +// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG +func GetConfigFilePathFromEnv() (path string) { + return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG") +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go index 3aa8d0590ba5e..b22d862fbc639 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -22,7 +22,7 @@ import "github.com/prometheus/client_golang/prometheus" // Prometheus metrics. Note that the data models of expvar and Prometheus are // fundamentally different, and that the expvar Collector is inherently slower // than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more +// for experiments and prototyping, but you should seriously consider a more // direct implementation of Prometheus metrics for monitoring production // systems. // diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index 2f5616894e7d1..bcfa4fa10e014 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -132,16 +132,19 @@ type GoCollectionOption uint32 const ( // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. - // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + // + // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. - // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // + // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) // function to enable those metrics in the collector. GoRuntimeMetricsCollection ) // WithGoCollections allows enabling different collections for Go collector on top of base metrics. -// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +// +// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { return func(options *internal.GoCollectorOptions) { if flags&GoRuntimeMemStatsCollection == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 62de4dc59aaec..4ce84e7a80e50 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -66,7 +67,7 @@ type CounterVecOpts struct { CounterOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -106,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error { exemplar = e.(*dto.Exemplar) } val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index deedc2dfbe756..68ffe3c248077 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -52,7 +52,7 @@ type Desc struct { constLabelPairs []*dto.LabelPair // variableLabels contains names of labels and normalization function for // which the metric maintains variable values. - variableLabels ConstrainedLabels + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels.constrainedLabels(), + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, label := range d.variableLabels { - if !checkLabelName(label.Name) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+label.Name) - labelNameSet[label.Name] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) @@ -189,11 +189,19 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3bb9b..de5a856293180 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index f1ea6c76f7564..dd2eac940675b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -62,7 +62,7 @@ type GaugeVecOpts struct { GaugeOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d818afe90d79..1feba62c6c930 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -25,6 +25,7 @@ import ( dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // nativeHistogramBounds for the frac of observed values. Only relevant for @@ -391,7 +392,7 @@ type HistogramOpts struct { // zero, it is replaced by default buckets. The default buckets are // DefBuckets if no buckets for a native histogram (see below) are used, // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have + // use both regular buckets and buckets for a native histogram, you have // to define the regular buckets here explicitly.) Buckets []float64 @@ -413,8 +414,8 @@ type HistogramOpts struct { // and 2, same as between 2 and 4, and 4 and 8, etc.). // // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is // still smaller or equal to NativeHistogramBucketFactor. Note that the // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) // ). If NativeHistogramBucketFactor is greater than 1 but smaller than @@ -428,12 +429,12 @@ type HistogramOpts struct { // a major version bump. NativeHistogramBucketFactor float64 // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If // NativeHistogramZeroThreshold is left at zero, - // DefNativeHistogramZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only // observations of precisely zero will go into the zero bucket), set // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // constant (or any negative float value). @@ -446,26 +447,34 @@ type HistogramOpts struct { // Histogram are sufficiently wide-spread. In particular, this could be // used as a DoS attack vector. Where the observed values depend on // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber.) Once the set // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). NativeHistogramMaxBucketNumber uint32 NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -475,7 +484,7 @@ type HistogramVecOpts struct { HistogramOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -499,12 +508,12 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == bucketLabel { + for _, n := range desc.variableLabels.names { + if n == bucketLabel { panic(errBucketLabelNotAllowed) } } @@ -514,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } + if opts.now == nil { + opts.now = time.Now + } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -521,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, + lastResetTime: opts.now(), + now: opts.now, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -701,9 +714,11 @@ type histogram struct { nativeHistogramMaxZeroThreshold float64 nativeHistogramMaxBuckets uint32 nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (h *histogram) Desc() *Desc { @@ -742,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error { waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } out.Histogram = his out.Label = h.labelPairs @@ -782,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error { his.ZeroCount = proto.Uint64(zeroBucket) his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } } addAndResetCounts(hotCounts, coldCounts) return nil @@ -854,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration +// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration // has been passed. It returns true if the histogram has been reset. The caller // must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + if h.nativeHistogramMinResetDuration == 0 || + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } // Completely reset coldCounts. h.resetCounts(cold) // Repeat the latest observation to not lose it completely. cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. + // Make coldCounts the new hot counts while resetting countAndHotIdx. n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) count := n & ((1 << 63) - 1) waitForCooldown(count, hot) @@ -1176,6 +1205,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -1183,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } buckets := make([]*dto.Bucket, 0, len(h.buckets)) @@ -1230,7 +1262,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -1324,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { // Multiple spans with only small gaps in between are probably // encoded more efficiently as one larger span with a few empty // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create + // we assume that gaps of one or two buckets should not create // a new span. iDelta := int32(i - nextI) if n == 0 || iDelta > 2 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index fd0750f2cf502..a595a20362541 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -14,7 +14,7 @@ // It provides tools to compare sequences of strings and generate textual diffs. // // Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. +// (https://github.com/pmezard/go-difflib) is no longer maintained. package internal import ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 63ff8683ce52c..b3c4eca2bc1c8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -32,19 +32,15 @@ import ( // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + // ConstrainedLabels represents a label name and its constrain function // to normalize label values. This type is commonly used when constructing // metric vector Collectors. type ConstrainedLabel struct { Name string - Constraint func(string) string -} - -func (cl ConstrainedLabel) Constrain(v string) string { - if cl.Constraint == nil { - return v - } - return cl.Constraint(v) + Constraint LabelConstraint } // ConstrainableLabels is an interface that allows creating of labels that can @@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string { // }, // }) type ConstrainableLabels interface { - constrainedLabels() ConstrainedLabels + compile() *compiledLabels labelNames() []string } @@ -67,8 +63,20 @@ type ConstrainableLabels interface { // metric vector Collectors. type ConstrainedLabels []ConstrainedLabel -func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels { - return cls +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled } func (cls ConstrainedLabels) labelNames() []string { @@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string { // } type UnconstrainedLabels []string -func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels { - constrainedLabels := make([]ConstrainedLabel, len(uls)) - for i, l := range uls { - constrainedLabels[i] = ConstrainedLabel{Name: l} +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, } - return constrainedLabels } func (uls UnconstrainedLabels) labelNames() []string { return uls } +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 07bbc9d76871f..f018e57237d52 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -92,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go index fa90115921cdf..58f96599f1349 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go @@ -17,7 +17,7 @@ // constructors register the Collectors with a registry before returning them. // There are two sets of constructors. The constructors in the first set are // top-level functions, while the constructors in the other set are methods of -// the Factory type. The top-level function return Collectors registered with +// the Factory type. The top-level functions return Collectors registered with // the global registry (prometheus.DefaultRegisterer), while the methods return // Collectors registered with the registry the Factory was constructed with. All // constructors panic if the registration fails. @@ -85,7 +85,7 @@ // } // // A Factory is created with the With(prometheus.Registerer) function, which -// enables two usage pattern. With(prometheus.Registerer) can be called once per +// enables two usage patterns. With(prometheus.Registerer) can be called once per // line: // // var ( @@ -153,7 +153,7 @@ // importing a package. // // A separate package allows conservative users to entirely ignore it. And -// whoever wants to use it, will do so explicitly, with an opportunity to read +// whoever wants to use it will do so explicitly, with an opportunity to read // this warning. // // Enjoy promauto responsibly! diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 3793036ad09be..356edb7868ccd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -389,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool { return true } -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + labels := prometheus.Labels{} + if !(code || method) { - return emptyLabels + return labels } - labels := prometheus.Labels{} if code { labels["code"] = sanitizeCode(status) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index d3a3cef2a1dfb..b2c32f4294414 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -289,7 +289,7 @@ func (p *Pusher) push(ctx context.Context, method string) error { } if err := enc.Encode(mf); err != nil { return fmt.Errorf( - "failed to encode metric familty %s, error is %w", + "failed to encode metric family %s, error is %w", mf.GetName(), err) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 44da9433beef5..5e2ced25a02f7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -963,9 +963,9 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index dd359264e5929..1462704446c80 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -26,6 +26,7 @@ import ( "github.com/beorn7/perks/quantile" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -145,6 +146,9 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // SummaryVecOpts bundles the options to create a SummaryVec metric. @@ -154,7 +158,7 @@ type SummaryVecOpts struct { SummaryOpts // VariableLabels are used to partition the metric vector by the given set - // of labels. Each label value will be constrained with the optional Contraint + // of labels. Each label value will be constrained with the optional Constraint // function, if provided. VariableLabels ConstrainableLabels } @@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { - if n.Name == quantileLabel { + for _, n := range desc.variableLabels.names { + if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } @@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -286,6 +295,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() @@ -440,6 +453,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -681,6 +697,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -737,7 +756,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 5f6bb80014de6..cc23011fad23a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "sort" "time" @@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { return nil, err } @@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { desc *Desc metric *dto.Metric @@ -153,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -176,19 +215,19 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, l := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(l.Name), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index f0d0015a0ff9e..955cfd59f8398 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -20,24 +20,6 @@ import ( "github.com/prometheus/common/model" ) -var labelsPool = &sync.Pool{ - New: func() interface{} { - return make(Labels) - }, -} - -func getLabelsFromPool() Labels { - return labelsPool.Get().(Labels) -} - -func putLabelsToPool(labels Labels) { - for k := range labels { - delete(labels, k) - } - - labelsPool.Put(labels) -} - // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, @@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool { // Note that curried labels will never be matched if deleting from the curried vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector. func (m *MetricVec) DeletePartialMatch(labels Labels) int { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() return m.metricMap.deleteByLabels(labels, m.curry) } @@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label.Name) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - labels = constrainLabels(m.desc, labels) - defer putLabelsToPool(labels) + labels, closer := constrainLabels(m.desc, labels) + defer closer() h, err := m.hashLabels(labels) if err != nil { @@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label.Name] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label.Name) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label.Name) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values [] func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { for l, v := range labels { // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames()) + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) if validLabel { // Check the value of that label against the target value. // We don't consider curried values in partial matches. @@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe iCurry++ continue } - if values[i] != labels[k.Name] { + if values[i] != labels[k] { return false } } @@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } - labelValues[i] = labels[k.Name] + labelValues[i] = labels[k] } return labelValues } @@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { return labelValues } -func constrainLabels(desc *Desc, labels Labels) Labels { - constrainedLabels := getLabelsFromPool() - for l, v := range labels { - if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok { - v = desc.variableLabels[i].Constrain(v) - } +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} - constrainedLabels[l] = v +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} } - return constrainedLabels + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } } func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + constrainedValues := make([]string, len(lvs)) var iCurry, iLVs int for i := 0; i < len(lvs)+len(curry); i++ { @@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [ continue } - if i < len(desc.variableLabels) { - constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs]) + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) } else { constrainedValues[iLVs] = lvs[iLVs] } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2b5bca4b999a6..84946b2703541 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -215,8 +215,9 @@ type Counter struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Counter) Reset() { @@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar { return nil } +func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Quantile struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -325,9 +333,10 @@ type Summary struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } func (x *Summary) Reset() { @@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile { return nil } +func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + type Untyped struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -439,7 +455,8 @@ type Histogram struct { SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -525,6 +542,13 @@ func (x *Histogram) GetBucket() []*Bucket { return nil } +func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + func (x *Histogram) GetSchema() int32 { if x != nil && x.Schema != nil { return *x.Schema @@ -972,137 +996,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, - 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, - 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, - 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, + 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, + 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, + 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, + 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, - 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, - 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, - 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, - 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, - 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, - 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, + 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, + 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, - 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, - 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, - 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, - 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, - 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, - 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, - 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, + 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, - 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, - 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, - 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, - 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, - 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, - 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, - 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, - 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, - 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, - 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, - 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, - 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, - 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, - 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, - 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, - 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, + 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, + 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, + 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, + 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, + 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, + 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, } var ( @@ -1137,26 +1175,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ } var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar - 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile - 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket - 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan - 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp + 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp + 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index b111d25620007..0ce7ea4612e11 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum > /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.14.0 +PROMU_VERSION ?= 0.15.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.51.2 +GOLANGCI_LINT_VERSION ?= v1.53.3 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -178,7 +178,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint > /dev/null)) @echo "yamllint not installed so skipping" else yamllint . diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 60c551e026bf8..4980c875bfce1 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -20,8 +20,8 @@ import ( // FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. type FS struct { - proc fs.FS - real bool + proc fs.FS + isReal bool } // DefaultMountPoint is the common mount point of the proc filesystem. @@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) { return FS{}, err } - real, err := isRealProc(mountPoint) + isReal, err := isRealProc(mountPoint) if err != nil { return FS{}, err } - return FS{fs, real}, nil + return FS{fs, isReal}, nil } diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 8005769689664..13d74e3957154 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows -// +build netbsd openbsd solaris windows +//go:build netbsd || openbsd || solaris || windows || nostatfs +// +build netbsd openbsd solaris windows nostatfs package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 6233217ad292e..bee151445a053 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows -// +build !netbsd,!openbsd,!solaris,!windows +//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs +// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs package procfs diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index e1599961ff629..d1f71caa5d7aa 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -244,7 +244,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // a process. func (p Proc) FileDescriptorsLen() (int, error) { // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 - if p.fs.real { + if p.fs.isReal { stat, err := os.Stat(p.path("fd")) if err != nil { return 0, err diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index 152539d351719..fe9dbb425f578 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -64,11 +64,11 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } - return parsePSIStats(resource, bytes.NewReader(data)) + return parsePSIStats(bytes.NewReader(data)) } // parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { +func parsePSIStats(r io.Reader) (PSIStats, error) { psiStats := PSIStats{} scanner := bufio.NewScanner(r) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 0e97d99575e7e..ad8785a407aa0 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } vBytes := vKBytes * 1024 - s.addValue(k, v, vKBytes, vBytes) + s.addValue(k, vBytes) return nil } -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { switch k { case "Rss": s.Rss += vUintBytes diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 05269c896d93d..34fc3ee21b69a 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -187,6 +187,10 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { err error ) + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) + for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index 394762c73ebd2..df2215ece0084 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -55,7 +55,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { continue } - t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}}) + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) } return t, nil @@ -67,12 +67,12 @@ func (fs FS) Thread(pid, tid int) (Proc, error) { if _, err := os.Stat(taskPath); err != nil { return Proc{}, err } - return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil } // Thread returns a process for a given TID of Proc. func (proc Proc) Thread(tid int) (Proc, error) { - tfs := FS{fsi.FS(proc.path("task")), proc.fs.real} + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { return Proc{}, err } diff --git a/vendor/github.com/thanos-io/objstore/.go-version b/vendor/github.com/thanos-io/objstore/.go-version index adc97d8e22132..d8c94e3e98c4c 100644 --- a/vendor/github.com/thanos-io/objstore/.go-version +++ b/vendor/github.com/thanos-io/objstore/.go-version @@ -1 +1 @@ -1.18 +1.20.x diff --git a/vendor/github.com/thanos-io/objstore/CHANGELOG.md b/vendor/github.com/thanos-io/objstore/CHANGELOG.md index 32d0f74268b4c..5889578b2ee7e 100644 --- a/vendor/github.com/thanos-io/objstore/CHANGELOG.md +++ b/vendor/github.com/thanos-io/objstore/CHANGELOG.md @@ -11,9 +11,13 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re ## Unreleased ### Fixed +- [#102](https://github.com/thanos-io/objstore/pull/102) Azure: bump azblob sdk to get concurrency fixes. - [#33](https://github.com/thanos-io/objstore/pull/33) Tracing: Add `ContextWithTracer()` to inject the tracer into the context. - [#34](https://github.com/thanos-io/objstore/pull/34) Fix ignored options when creating shared credential Azure client. - [#62](https://github.com/thanos-io/objstore/pull/62) S3: Fix ignored context cancellation in `Iter` method. +- [#77](https://github.com/thanos-io/objstore/pull/77) Fix buckets wrapped with metrics from being unable to determine object sizes in `Upload`. +- [#78](https://github.com/thanos-io/objstore/pull/78) S3: Fix possible concurrent modification of the PutUserMetadata map. +- [#79](https://github.com/thanos-io/objstore/pull/79) Metrics: Fix `objstore_bucket_operation_duration_seconds` for `iter` operations. ### Added - [#15](https://github.com/thanos-io/objstore/pull/15) Add Oracle Cloud Infrastructure Object Storage Bucket support. @@ -27,9 +31,18 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#61](https://github.com/thanos-io/objstore/pull/61) Add OpenTelemetry TracingBucket. > This also changes the behaviour of `client.NewBucket`. Now it returns, uninstrumented and untraced bucket. You can combine `objstore.WrapWithMetrics` and `tracing/{opentelemetry,opentracing}.WrapWithTraces` to have old behavior. +- [#69](https://github.com/thanos-io/objstore/pull/69) [#66](https://github.com/thanos-io/objstore/pull/66) Add `objstore_bucket_operation_transferred_bytes` that counts the number of total bytes read from the bucket operation Get/GetRange and also counts the number of total bytes written to the bucket operation Upload. - [#64](https://github.com/thanos-io/objstore/pull/64) OCI: OKE Workload Identity support. - [#73](https://github.com/thanos-io/objstore/pull/73) Аdded file path to erros from DownloadFile - [#51](https://github.com/thanos-io/objstore/pull/51) Azure: Support using connection string authentication. +- [#76](https://github.com/thanos-io/objstore/pull/76) GCS: Query for object names only in `Iter` to possibly improve performance when listing objects. +- [#85](https://github.com/thanos-io/objstore/pull/85) S3: Allow checksum algorithm to be configured +- [#92](https://github.com/thanos-io/objstore/pull/92) GCS: Allow using a gRPC client. +- [#94](https://github.com/thanos-io/objstore/pull/94) Allow timingReadCloser to be seeker +- [#96](https://github.com/thanos-io/objstore/pull/96) Allow nopCloserWithObjectSize to be seeker +- [#86](https://github.com/thanos-io/objstore/pull/86) GCS: Add HTTP Config to GCS +- [#99](https://github.com/thanos-io/objstore/pull/99) Swift: Add HTTP_Config +- [#108](https://github.com/thanos-io/objstore/pull/108) Metrics: Add native histogram definitions to histograms ### Changed - [#38](https://github.com/thanos-io/objstore/pull/38) *: Upgrade minio-go version to `v7.0.45`. @@ -38,4 +51,5 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#65](https://github.com/thanos-io/objstore/pull/65) *: Upgrade minio-go version to `v7.0.61`. - [#70](https://github.com/thanos-io/objstore/pull/70) GCS: Update cloud.google.com/go/storage version to `v1.27.0`. - [#71](https://github.com/thanos-io/objstore/pull/71) Replace method `IsCustomerManagedKeyError` for a more generic `IsAccessDeniedErr` on the bucket interface. +- [#89](https://github.com/thanos-io/objstore/pull/89) GCS: Upgrade cloud.google.com/go/storage version to `v1.35.1`. ### Removed diff --git a/vendor/github.com/thanos-io/objstore/Makefile b/vendor/github.com/thanos-io/objstore/Makefile index 09fd0d3ea8d5b..ed5aaf4b16992 100644 --- a/vendor/github.com/thanos-io/objstore/Makefile +++ b/vendor/github.com/thanos-io/objstore/Makefile @@ -39,7 +39,7 @@ check-docs: $(MDOX) .PHONY: deps deps: ## Ensures fresh go.mod and go.sum. - @go mod tidy -compat=1.18 + @go mod tidy -compat=1.20 @go mod verify .PHONY: format diff --git a/vendor/github.com/thanos-io/objstore/README.md b/vendor/github.com/thanos-io/objstore/README.md index 2a1552714cd3c..6ebdffc1795c7 100644 --- a/vendor/github.com/thanos-io/objstore/README.md +++ b/vendor/github.com/thanos-io/objstore/README.md @@ -67,7 +67,7 @@ type Bucket interface { All [provider implementations](providers) have to implement `Bucket` interface that allows common read and write operations that all supported by all object providers. If you want to limit the code that will do bucket operation to only read access (smart idea, allowing to limit access permissions), you can use the [`BucketReader` interface](objstore.go): -```go mdox-exec="sed -n '68,88p' objstore.go" +```go mdox-exec="sed -n '68,93p' objstore.go" // BucketReader provides read access to an object storage bucket. type BucketReader interface { @@ -90,6 +90,10 @@ type BucketReader interface { // IsAccessDeniedErr returns true if access to object is denied. IsAccessDeniedErr(err error) bool + + // Attributes returns information about the specified object. + Attributes(ctx context.Context, name string) (ObjectAttributes, error) +} ``` Those interfaces represent the object storage operations your code can use from `objstore` clients. @@ -176,6 +180,7 @@ config: enable: false list_objects_version: "" bucket_lookup_type: auto + send_content_md5: true part_size: 67108864 sse_config: type: "" @@ -347,6 +352,24 @@ type: GCS config: bucket: "" service_account: "" + use_grpc: false + grpc_conn_pool_size: 0 + http_config: + idle_conn_timeout: 0s + response_header_timeout: 0s + insecure_skip_verify: false + tls_handshake_timeout: 0s + expect_continue_timeout: 0s + max_idle_conns: 0 + max_idle_conns_per_host: 0 + max_conns_per_host: 0 + tls_config: + ca_file: "" + cert_file: "" + key_file: "" + server_name: "" + insecure_skip_verify: false + disable_compression: false prefix: "" ``` @@ -494,6 +517,22 @@ config: connect_timeout: 10s timeout: 5m use_dynamic_large_objects: false + http_config: + idle_conn_timeout: 1m30s + response_header_timeout: 2m + insecure_skip_verify: false + tls_handshake_timeout: 10s + expect_continue_timeout: 1s + max_idle_conns: 100 + max_idle_conns_per_host: 100 + max_conns_per_host: 0 + tls_config: + ca_file: "" + cert_file: "" + key_file: "" + server_name: "" + insecure_skip_verify: false + disable_compression: false prefix: "" ``` diff --git a/vendor/github.com/thanos-io/objstore/exthttp/transport.go b/vendor/github.com/thanos-io/objstore/exthttp/transport.go index d9a807fdc131a..92f86a117b532 100644 --- a/vendor/github.com/thanos-io/objstore/exthttp/transport.go +++ b/vendor/github.com/thanos-io/objstore/exthttp/transport.go @@ -11,6 +11,16 @@ import ( "github.com/prometheus/common/model" ) +var DefaultHTTPConfig = HTTPConfig{ + IdleConnTimeout: model.Duration(90 * time.Second), + ResponseHeaderTimeout: model.Duration(2 * time.Minute), + TLSHandshakeTimeout: model.Duration(10 * time.Second), + ExpectContinueTimeout: model.Duration(1 * time.Second), + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + MaxConnsPerHost: 0, +} + // HTTPConfig stores the http.Transport configuration for the cos and s3 minio client. type HTTPConfig struct { IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` diff --git a/vendor/github.com/thanos-io/objstore/objstore.go b/vendor/github.com/thanos-io/objstore/objstore.go index 84eafce3df4a8..c913068711b46 100644 --- a/vendor/github.com/thanos-io/objstore/objstore.go +++ b/vendor/github.com/thanos-io/objstore/objstore.go @@ -85,7 +85,7 @@ type BucketReader interface { // IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. IsObjNotFoundErr(err error) bool - // IsAccessDeniedErr returns true if acces to object is denied. + // IsAccessDeniedErr returns true if access to object is denied. IsAccessDeniedErr(err error) bool // Attributes returns information about the specified object. @@ -429,13 +429,21 @@ func WrapWithMetrics(b Bucket, reg prometheus.Registerer, name string) *metricBu Help: "Number of bytes transferred from/to bucket per operation.", ConstLabels: prometheus.Labels{"bucket": name}, Buckets: prometheus.ExponentialBuckets(2<<14, 2, 16), // 32KiB, 64KiB, ... 1GiB + // Use factor=2 for native histograms, which gives similar buckets as the original exponential buckets. + NativeHistogramBucketFactor: 2, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{"operation"}), opsDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "objstore_bucket_operation_duration_seconds", - Help: "Duration of successful operations against the bucket", + Help: "Duration of successful operations against the bucket per operation - iter operations include time spent on each callback.", ConstLabels: prometheus.Labels{"bucket": name}, Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, + // Use the recommended defaults for native histograms with 10% growth factor. + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{"operation"}), lastSuccessfulUploadTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ @@ -458,11 +466,12 @@ func WrapWithMetrics(b Bucket, reg prometheus.Registerer, name string) *metricBu bkt.opsDuration.WithLabelValues(op) bkt.opsFetchedBytes.WithLabelValues(op) } - // fetched bytes only relevant for get and getrange + + // fetched bytes only relevant for get, getrange and upload for _, op := range []string{ OpGet, OpGetRange, - // TODO: Add uploads + OpUpload, } { bkt.opsTransferredBytes.WithLabelValues(op) } @@ -503,12 +512,14 @@ func (b *metricBucket) Iter(ctx context.Context, dir string, f func(name string) const op = OpIter b.ops.WithLabelValues(op).Inc() + start := time.Now() err := b.bkt.Iter(ctx, dir, f, options...) if err != nil { if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { b.opsFailures.WithLabelValues(op).Inc() } } + b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) return err } @@ -539,8 +550,9 @@ func (b *metricBucket) Get(ctx context.Context, name string) (io.ReadCloser, err } return nil, err } - return newTimingReadCloser( + return newTimingReader( rc, + true, op, b.opsDuration, b.opsFailures, @@ -561,8 +573,9 @@ func (b *metricBucket) GetRange(ctx context.Context, name string, off, length in } return nil, err } - return newTimingReadCloser( + return newTimingReader( rc, + true, op, b.opsDuration, b.opsFailures, @@ -592,15 +605,26 @@ func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) err const op = OpUpload b.ops.WithLabelValues(op).Inc() - start := time.Now() - if err := b.bkt.Upload(ctx, name, r); err != nil { + trc := newTimingReader( + r, + false, + op, + b.opsDuration, + b.opsFailures, + b.isOpFailureExpected, + nil, + b.opsTransferredBytes, + ) + defer trc.Close() + err := b.bkt.Upload(ctx, name, trc) + if err != nil { if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { b.opsFailures.WithLabelValues(op).Inc() } return err } b.lastSuccessfulUploadTime.SetToCurrentTime() - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) + return nil } @@ -636,8 +660,13 @@ func (b *metricBucket) Name() string { return b.bkt.Name() } -type timingReadCloser struct { - io.ReadCloser +type timingReader struct { + io.Reader + + // closeReader holds whether the wrapper io.Reader should be closed when + // Close() is called on the timingReader. + closeReader bool + objSize int64 objSizeErr error @@ -653,13 +682,15 @@ type timingReadCloser struct { transferredBytes *prometheus.HistogramVec } -func newTimingReadCloser(rc io.ReadCloser, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc, fetchedBytes *prometheus.CounterVec, transferredBytes *prometheus.HistogramVec) *timingReadCloser { +func newTimingReader(r io.Reader, closeReader bool, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc, fetchedBytes *prometheus.CounterVec, transferredBytes *prometheus.HistogramVec) io.ReadCloser { // Initialize the metrics with 0. dur.WithLabelValues(op) failed.WithLabelValues(op) - objSize, objSizeErr := TryToGetSize(rc) - return &timingReadCloser{ - ReadCloser: rc, + objSize, objSizeErr := TryToGetSize(r) + + trc := timingReader{ + Reader: r, + closeReader: closeReader, objSize: objSize, objSizeErr: objSizeErr, start: time.Now(), @@ -671,35 +702,80 @@ func newTimingReadCloser(rc io.ReadCloser, op string, dur *prometheus.HistogramV transferredBytes: transferredBytes, readBytes: 0, } + + _, isSeeker := r.(io.Seeker) + _, isReaderAt := r.(io.ReaderAt) + + if isSeeker && isReaderAt { + // The assumption is that in most cases when io.ReaderAt() is implemented then + // io.Seeker is implemented too (e.g. os.File). + return &timingReaderSeekerReaderAt{timingReaderSeeker: timingReaderSeeker{timingReader: trc}} + } + if isSeeker { + return &timingReaderSeeker{timingReader: trc} + } + + return &trc } -func (t *timingReadCloser) ObjectSize() (int64, error) { - return t.objSize, t.objSizeErr +func (r *timingReader) ObjectSize() (int64, error) { + return r.objSize, r.objSizeErr } -func (rc *timingReadCloser) Close() error { - err := rc.ReadCloser.Close() - if !rc.alreadyGotErr && err != nil { - rc.failed.WithLabelValues(rc.op).Inc() +func (r *timingReader) Close() error { + var closeErr error + + // Call the wrapped reader if it implements Close(), only if we've been asked to close it. + if closer, ok := r.Reader.(io.Closer); r.closeReader && ok { + closeErr = closer.Close() + + if !r.alreadyGotErr && closeErr != nil { + r.failed.WithLabelValues(r.op).Inc() + r.alreadyGotErr = true + } } - if !rc.alreadyGotErr && err == nil { - rc.duration.WithLabelValues(rc.op).Observe(time.Since(rc.start).Seconds()) - rc.transferredBytes.WithLabelValues(rc.op).Observe(float64(rc.readBytes)) - rc.alreadyGotErr = true + + // Track duration and transferred bytes only if no error occurred. + if !r.alreadyGotErr { + r.duration.WithLabelValues(r.op).Observe(time.Since(r.start).Seconds()) + r.transferredBytes.WithLabelValues(r.op).Observe(float64(r.readBytes)) + + // Trick to tracking metrics multiple times in case Close() gets called again. + r.alreadyGotErr = true } - return err + + return closeErr } -func (rc *timingReadCloser) Read(b []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(b) - rc.fetchedBytes.WithLabelValues(rc.op).Add(float64(n)) - rc.readBytes += int64(n) +func (r *timingReader) Read(b []byte) (n int, err error) { + n, err = r.Reader.Read(b) + if r.fetchedBytes != nil { + r.fetchedBytes.WithLabelValues(r.op).Add(float64(n)) + } + + r.readBytes += int64(n) // Report metric just once. - if !rc.alreadyGotErr && err != nil && err != io.EOF { - if !rc.isFailureExpected(err) { - rc.failed.WithLabelValues(rc.op).Inc() + if !r.alreadyGotErr && err != nil && err != io.EOF { + if !r.isFailureExpected(err) { + r.failed.WithLabelValues(r.op).Inc() } - rc.alreadyGotErr = true + r.alreadyGotErr = true } return n, err } + +type timingReaderSeeker struct { + timingReader +} + +func (rsc *timingReaderSeeker) Seek(offset int64, whence int) (int64, error) { + return (rsc.Reader).(io.Seeker).Seek(offset, whence) +} + +type timingReaderSeekerReaderAt struct { + timingReaderSeeker +} + +func (rsc *timingReaderSeekerReaderAt) ReadAt(p []byte, off int64) (int, error) { + return (rsc.Reader).(io.ReaderAt).ReadAt(p, off) +} diff --git a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go index 7b4a5fbe51f14..846394a08a747 100644 --- a/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go +++ b/vendor/github.com/thanos-io/objstore/providers/azure/helpers.go @@ -63,18 +63,27 @@ func getContainerClient(conf Config) (*container.Client, error) { return containerClient, nil } - // Use MSI for authentication. - msiOpt := &azidentity.ManagedIdentityCredentialOptions{} + // Otherwise use a token credential + var cred azcore.TokenCredential + + // Use Managed Identity Credential if a user assigned ID is set if conf.UserAssignedID != "" { + msiOpt := &azidentity.ManagedIdentityCredentialOptions{} msiOpt.ID = azidentity.ClientID(conf.UserAssignedID) + cred, err = azidentity.NewManagedIdentityCredential(msiOpt) + } else { + // Otherwise use Default Azure Credential + cred, err = azidentity.NewDefaultAzureCredential(nil) } - cred, err := azidentity.NewManagedIdentityCredential(msiOpt) + if err != nil { return nil, err } + containerClient, err := container.NewClient(containerURL, cred, opt) if err != nil { return nil, err } + return containerClient, nil } diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index 5ea45c7e97bda..75c6130374d80 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -8,9 +8,11 @@ import ( "context" "fmt" "io" + "net/http" "runtime" "strings" "testing" + "time" "cloud.google.com/go/storage" "github.com/go-kit/log" @@ -19,20 +21,34 @@ import ( "golang.org/x/oauth2/google" "google.golang.org/api/iterator" "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "gopkg.in/yaml.v2" "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/exthttp" ) // DirDelim is the delimiter used to model a directory structure in an object store bucket. const DirDelim = "/" +var DefaultConfig = Config{ + HTTPConfig: exthttp.DefaultHTTPConfig, +} + // Config stores the configuration for gcs bucket. type Config struct { Bucket string `yaml:"bucket"` ServiceAccount string `yaml:"service_account"` + UseGRPC bool `yaml:"use_grpc"` + // GRPCConnPoolSize controls the size of the gRPC connection pool and should only be used + // when direct path is not enabled. + // See https://pkg.go.dev/cloud.google.com/go/storage#hdr-Experimental_gRPC_API for more details + // on how to enable direct path. + GRPCConnPoolSize int `yaml:"grpc_conn_pool_size"` + HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` } // Bucket implements the store.Bucket and shipper.Bucket interfaces against GCS. @@ -44,14 +60,23 @@ type Bucket struct { closer io.Closer } +// parseConfig unmarshals a buffer into a Config with default values. +func parseConfig(conf []byte) (Config, error) { + config := DefaultConfig + if err := yaml.UnmarshalStrict(conf, &config); err != nil { + return Config{}, err + } + + return config, nil +} + // NewBucket returns a new Bucket against the given bucket handle. func NewBucket(ctx context.Context, logger log.Logger, conf []byte, component string) (*Bucket, error) { - var gc Config - if err := yaml.Unmarshal(conf, &gc); err != nil { + config, err := parseConfig(conf) + if err != nil { return nil, err } - - return NewBucketWithConfig(ctx, logger, gc, component) + return NewBucketWithConfig(ctx, logger, config, component) } // NewBucketWithConfig returns a new Bucket with gcs Config struct. @@ -75,7 +100,51 @@ func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, comp option.WithUserAgent(fmt.Sprintf("thanos-%s/%s (%s)", component, version.Version, runtime.Version())), ) - gcsClient, err := storage.NewClient(ctx, opts...) + // Check if a roundtripper has been set in the config + // otherwise build the default transport. + var rt http.RoundTripper + if gc.HTTPConfig.Transport != nil { + rt = gc.HTTPConfig.Transport + } else { + var err error + rt, err = exthttp.DefaultTransport(gc.HTTPConfig) + if err != nil { + return nil, err + } + } + + // GCS uses some defaults when "options.WithHTTPClient" is not used that are important when we call + // htransport.NewTransport namely the scopes that are then used for OAth authentication. So to build our own + // http client we need to se those defaults + opts = append(opts, option.WithScopes(storage.ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform")) + gRT, err := htransport.NewTransport(context.Background(), rt, opts...) + if err != nil { + return nil, err + } + + httpCli := &http.Client{ + Transport: gRT, + Timeout: time.Duration(gc.HTTPConfig.IdleConnTimeout), + } + opts = append(opts, option.WithHTTPClient(httpCli)) + + return newBucket(ctx, logger, gc, opts) +} + +func newBucket(ctx context.Context, logger log.Logger, gc Config, opts []option.ClientOption) (*Bucket, error) { + var ( + err error + gcsClient *storage.Client + ) + if gc.UseGRPC { + opts = append(opts, + option.WithGRPCDialOption(grpc.WithRecvBufferPool(grpc.NewSharedBufferPool())), + option.WithGRPCConnectionPool(gc.GRPCConnPoolSize), + ) + gcsClient, err = storage.NewGRPCClient(ctx, opts...) + } else { + gcsClient, err = storage.NewClient(ctx, opts...) + } if err != nil { return nil, err } @@ -108,10 +177,16 @@ func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt delimiter = "" } - it := b.bkt.Objects(ctx, &storage.Query{ + query := &storage.Query{ Prefix: dir, Delimiter: delimiter, - }) + } + err := query.SetAttrSelection([]string{"Name"}) + if err != nil { + return err + } + + it := b.bkt.Objects(ctx, query) for { select { case <-ctx.Done(): diff --git a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go index f92d397398738..721300349e1c7 100644 --- a/vendor/github.com/thanos-io/objstore/providers/s3/s3.go +++ b/vendor/github.com/thanos-io/objstore/providers/s3/s3.go @@ -14,7 +14,6 @@ import ( "strconv" "strings" "testing" - "time" "github.com/efficientgo/core/logerrcapture" "github.com/go-kit/log" @@ -23,7 +22,6 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/pkg/errors" - "github.com/prometheus/common/model" "github.com/prometheus/common/version" "gopkg.in/yaml.v2" @@ -101,18 +99,11 @@ const ( ) var DefaultConfig = Config{ - PutUserMetadata: map[string]string{}, - HTTPConfig: exthttp.HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, + PutUserMetadata: map[string]string{}, + HTTPConfig: exthttp.DefaultHTTPConfig, PartSize: 1024 * 1024 * 64, // 64MB. BucketLookupType: AutoLookup, + SendContentMd5: true, // Default to using MD5. } // HTTPConfig exists here only because Cortex depends on it, and we depend on Cortex. @@ -136,6 +127,7 @@ type Config struct { TraceConfig TraceConfig `yaml:"trace"` ListObjectsVersion string `yaml:"list_objects_version"` BucketLookupType BucketLookupType `yaml:"bucket_lookup_type"` + SendContentMd5 bool `yaml:"send_content_md5"` // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. // NOTE we need to make sure this number does not produce more parts than 10 000. PartSize uint64 `yaml:"part_size"` @@ -166,6 +158,7 @@ type Bucket struct { storageClass string partSize uint64 listObjectsV1 bool + sendContentMd5 bool } // parseConfig unmarshals a buffer into a Config with default values. @@ -334,6 +327,7 @@ func NewBucketWithConfig(logger log.Logger, config Config, component string) (*B storageClass: storageClass, partSize: config.PartSize, listObjectsV1: config.ListObjectsVersion == "v1", + sendContentMd5: config.SendContentMd5, } return bkt, nil } @@ -492,6 +486,13 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { if size < int64(partSize) { partSize = 0 } + + // Cloning map since minio may modify it + userMetadata := make(map[string]string, len(b.putUserMetadata)) + for k, v := range b.putUserMetadata { + userMetadata[k] = v + } + if _, err := b.client.PutObject( ctx, b.name, @@ -501,8 +502,9 @@ func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { minio.PutObjectOptions{ PartSize: partSize, ServerSideEncryption: sse, - UserMetadata: b.putUserMetadata, + UserMetadata: userMetadata, StorageClass: b.storageClass, + SendContentMd5: b.sendContentMd5, // 4 is what minio-go have as the default. To be certain we do micro benchmark before any changes we // ensure we pin this number to four. // TODO(bwplotka): Consider adjusting this number to GOMAXPROCS or to expose this in config if it becomes bottleneck. diff --git a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go index 9bfa3cf85107a..a8c56c55884d2 100644 --- a/vendor/github.com/thanos-io/objstore/providers/swift/swift.go +++ b/vendor/github.com/thanos-io/objstore/providers/swift/swift.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "net/http" "os" "strconv" "strings" @@ -21,6 +22,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/exthttp" "gopkg.in/yaml.v2" ) @@ -37,33 +39,35 @@ var DefaultConfig = Config{ Retries: 3, ConnectTimeout: model.Duration(10 * time.Second), Timeout: model.Duration(5 * time.Minute), + HTTPConfig: exthttp.DefaultHTTPConfig, } type Config struct { - AuthVersion int `yaml:"auth_version"` - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ApplicationCredentialID string `yaml:"application_credential_id"` - ApplicationCredentialName string `yaml:"application_credential_name"` - ApplicationCredentialSecret string `yaml:"application_credential_secret"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` - ChunkSize int64 `yaml:"large_object_chunk_size"` - SegmentContainerName string `yaml:"large_object_segments_container_name"` - Retries int `yaml:"retries"` - ConnectTimeout model.Duration `yaml:"connect_timeout"` - Timeout model.Duration `yaml:"timeout"` - UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` + AuthVersion int `yaml:"auth_version"` + AuthUrl string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserId string `yaml:"user_id"` + Password string `yaml:"password"` + DomainId string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ApplicationCredentialID string `yaml:"application_credential_id"` + ApplicationCredentialName string `yaml:"application_credential_name"` + ApplicationCredentialSecret string `yaml:"application_credential_secret"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` + ChunkSize int64 `yaml:"large_object_chunk_size"` + SegmentContainerName string `yaml:"large_object_segments_container_name"` + Retries int `yaml:"retries"` + ConnectTimeout model.Duration `yaml:"connect_timeout"` + Timeout model.Duration `yaml:"timeout"` + UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` + HTTPConfig exthttp.HTTPConfig `yaml:"http_config"` } func parseConfig(conf []byte) (*Config, error) { @@ -101,6 +105,7 @@ func configFromEnv() (*Config, error) { ConnectTimeout: model.Duration(c.ConnectTimeout), Timeout: model.Duration(c.Timeout), UseDynamicLargeObjects: false, + HTTPConfig: DefaultConfig.HTTPConfig, } if os.Getenv("SWIFT_CHUNK_SIZE") != "" { var err error @@ -115,7 +120,7 @@ func configFromEnv() (*Config, error) { return &config, nil } -func connectionFromConfig(sc *Config) *swift.Connection { +func connectionFromConfig(sc *Config, rt http.RoundTripper) *swift.Connection { connection := swift.Connection{ AuthVersion: sc.AuthVersion, AuthUrl: sc.AuthUrl, @@ -135,6 +140,7 @@ func connectionFromConfig(sc *Config) *swift.Connection { Retries: sc.Retries, ConnectTimeout: time.Duration(sc.ConnectTimeout), Timeout: time.Duration(sc.Timeout), + Transport: rt, } return &connection } @@ -173,7 +179,21 @@ func ensureContainer(connection *swift.Connection, name string, createIfNotExist } func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool) (*Container, error) { - connection := connectionFromConfig(sc) + + // Check if a roundtripper has been set in the config + // otherwise build the default transport. + var rt http.RoundTripper + if sc.HTTPConfig.Transport != nil { + rt = sc.HTTPConfig.Transport + } else { + var err error + rt, err = exthttp.DefaultTransport(sc.HTTPConfig) + if err != nil { + return nil, err + } + } + + connection := connectionFromConfig(sc, rt) if err := connection.Authenticate(); err != nil { return nil, errors.Wrap(err, "authentication") } diff --git a/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go b/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go index 8b99e304ddf59..0a26ceeb66f50 100644 --- a/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go +++ b/vendor/github.com/thanos-io/objstore/tracing/opentracing/opentracing.go @@ -209,8 +209,8 @@ func startSpan(ctx context.Context, operationName string, opts ...opentracing.St // doWithSpan executes function doFn inside new span with `operationName` name and hooking as child to a span found within given context if any. // It uses opentracing.Tracer propagated in context. If no found, it uses noop tracer notification. -func doWithSpan(ctx context.Context, operationName string, doFn func(context.Context, Span), opts ...opentracing.StartSpanOption) { - span, newCtx := startSpan(ctx, operationName, opts...) +func doWithSpan(ctx context.Context, operationName string, doFn func(context.Context, Span), _ ...opentracing.StartSpanOption) { + span, newCtx := startSpan(ctx, operationName) defer span.Finish() doFn(newCtx, span) } diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index d33c8890fc53e..0000000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index 495c1fa69725b..0000000000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5ea3e7d..ec2202bd7d5f6 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²⁸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go index 2cb9c408f2e78..0c1b8679376ae 100644 --- a/vendor/golang.org/x/net/context/go17.go +++ b/vendor/golang.org/x/net/context/go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 -// +build go1.7 package context diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go index 64d31ecc3ef43..e31e35a9045b7 100644 --- a/vendor/golang.org/x/net/context/go19.go +++ b/vendor/golang.org/x/net/context/go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package context diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go index 7b6b685114a9b..065ff3dfa5254 100644 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go index 1f9715341faac..ec5a638033588 100644 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.9 -// +build !go1.9 package context diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8de7413..e6f55cbd163a3 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index c1f6b90dc32f6..e2b298d859344 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error { } func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b032ec51..0000000000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab93c5c..0000000000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b31acdb..0000000000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa8197fec..0000000000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7ac75c..0000000000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c96bc08..0000000000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b3e1f3..ae94c6408d5dc 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c4a160..df578b86c650a 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338dbed47..712f1ad839f27 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85febb66..7b371788473a5 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698cefbd8f..cc6a892a4a3cb 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1c5a0e..40e74bb3d2ac1 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef459bb7..c6c2bf10a60a1 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba71256c..76789393cc0c7 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bcc3b81..0600cd2ae547f 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701eadfb37c..2fb768ef6d9a0 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778f01f3..5ff05fe1afca7 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b5332e3e3..0f25e84ca2076 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904b379a..8a75b9667334e 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc13cbb..fa45bb90745be 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go index 4bdaaaf1ad41c..33a5bf59c32f1 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go index 0d30e0a0f2eae..68f438c845542 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go index 4936e8a6f3b59..058ea8de89aee 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux -// +build arm mips mipsle 386 ppc -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go index f6877f98fdfcb..3ca0d3a0ab907 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go index d3dbe1b8e015a..6d0e426cddb8e 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go index 1d9f2ed625bc6..7ca9cb7e782af 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go index 19d46789de4da..0211f225bf8fe 100644 --- a/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go index 5b1d50ae72575..2038f290433dd 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_dontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_dontwait.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go index be6340958351d..70e6f448b049a 100644 --- a/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go +++ b/vendor/golang.org/x/net/internal/socket/complete_nodontwait.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || windows || zos -// +build aix windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/empty.s b/vendor/golang.org/x/net/internal/socket/empty.s index 90ab4ca3d8e6f..49d79791e01bb 100644 --- a/vendor/golang.org/x/net/internal/socket/empty.s +++ b/vendor/golang.org/x/net/internal/socket/empty.s @@ -3,6 +3,5 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 // This exists solely so we can linkname in symbols from syscall. diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go index 78f4129047e27..7a5cc5c43e10c 100644 --- a/vendor/golang.org/x/net/internal/socket/error_unix.go +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go index 2b8fbb3f3d080..340e53fbdabdc 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && (darwin || dragonfly || freebsd || linux || netbsd || openbsd) -// +build arm mips mipsle 386 ppc -// +build darwin dragonfly freebsd linux netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go index 2e94e96f8b9d2..26470c191a261 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos) -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build aix darwin dragonfly freebsd linux netbsd openbsd zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go index f7da2bc4d4bcd..8859ce1035229 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go index 14caf52483eea..da886b0326ff9 100644 --- a/vendor/golang.org/x/net/internal/socket/iovec_stub.go +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go index 113e773cd531c..4825b21e3e774 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && !netbsd -// +build !aix,!linux,!netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go index 41883c530c80f..311fd2c78975b 100644 --- a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || netbsd -// +build aix linux netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go index 25f6847f99a7c..ebff4f6e05aaf 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go index 5b8e00f1cd4bf..62e6fe8616469 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd -// +build aix darwin dragonfly freebsd netbsd package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go index b4658fbaeb800..3dd07250a6062 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm || mips || mipsle || 386 || ppc) && linux -// +build arm mips mipsle 386 ppc -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go index 42411affad9e4..5af9ddd6ab8f6 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux -// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go index 3098f5d783def..e212b50f8d93f 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go index eb79151f6ae13..e876776459807 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go index 324e9ee7d1ecf..529db68ee306f 100644 --- a/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go +++ b/vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && zos -// +build s390x,zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/norace.go b/vendor/golang.org/x/net/internal/socket/norace.go index de0ad420fc529..8af30ecfbb3c7 100644 --- a/vendor/golang.org/x/net/internal/socket/norace.go +++ b/vendor/golang.org/x/net/internal/socket/norace.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !race -// +build !race package socket diff --git a/vendor/golang.org/x/net/internal/socket/race.go b/vendor/golang.org/x/net/internal/socket/race.go index f0a28a625d081..9afa958083a71 100644 --- a/vendor/golang.org/x/net/internal/socket/race.go +++ b/vendor/golang.org/x/net/internal/socket/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build race -// +build race package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go index 8f79b38f74066..0431390789d4d 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go index f7d0b0d2b853d..7c0d7410bccb0 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go index 02f32855660a1..e363fb5a891c0 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package socket diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go index dd785877b66b1..ff7a8baf0b338 100644 --- a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go index b258879d446e3..e7664d48bec19 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_bsd.go +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go index 5d99f2373f205..d7627f87eb80e 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go index 76f5b8ae5d5de..08d4910778c5e 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !s390x && !386 -// +build linux,!s390x,!386 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go index af964e61713a5..1d182470d0272 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 -// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go index 5b128fbb2a2c6..0e407d12571d5 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 -// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go index 42b8f2340e365..58d8654824405 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_posix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go index 7cfb349c0cd53..2e5b473c66079 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_stub.go +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package socket diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go index de823932b9a71..93058db5b99ac 100644 --- a/vendor/golang.org/x/net/internal/socket/sys_unix.go +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go index 00691bd524453..45bab004c1400 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go index 6a94fec2c5428..b6fc15a1a24b7 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package socket diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go index c066272ddd11f..e67fc3cbaaa52 100644 --- a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package socket diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go index b7385dfd95ab4..c88da8cbe742e 100644 --- a/vendor/golang.org/x/net/ipv4/control_bsd.go +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go index 0e748dbdc4680..14ae2dae49b11 100644 --- a/vendor/golang.org/x/net/ipv4/control_pktinfo.go +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux || solaris -// +build darwin linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go index f27322c3ed47b..3ba66116094db 100644 --- a/vendor/golang.org/x/net/ipv4/control_stub.go +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go index 2413e02f8f2d6..2e765548f3abd 100644 --- a/vendor/golang.org/x/net/ipv4/control_unix.go +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go index cd4ee6e1c9208..c2c4ce7ff546f 100644 --- a/vendor/golang.org/x/net/ipv4/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go index 1bb370e25fd7d..91c685e8fca17 100644 --- a/vendor/golang.org/x/net/ipv4/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go index 53f0794eb7643..2afd4b50ef3dc 100644 --- a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go index eb07c1c02a5cc..82e2c378382db 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go index cf036893b7d1c..840108bf76e3c 100644 --- a/vendor/golang.org/x/net/ipv4/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go index 02730cdfd270c..9244a68a38be3 100644 --- a/vendor/golang.org/x/net/ipv4/sys_aix.go +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -4,7 +4,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go index 22322b387ec9d..645f254c6d2d9 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go index fde640142df59..48cfb6db2f535 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go index 54eb9901b5fcc..0b27b632f1819 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux -// +build darwin freebsd linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go index dcb15f25a55c8..303a5e2e68745 100644 --- a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux -// +build !darwin,!freebsd,!linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go index fb11e324e2ca7..1b4780df413db 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go index fc53a0d33ae97..b1f779b493777 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go index e191b2f14f9dc..b7b032d260193 100644 --- a/vendor/golang.org/x/net/ipv4/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build netbsd || openbsd -// +build netbsd openbsd package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go index 6a4e7abf9bdff..a295e15ea00c4 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || freebsd || linux || solaris -// +build darwin freebsd linux solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go index 157159fd507bd..74bd454e256ca 100644 --- a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !freebsd && !linux && !solaris -// +build !darwin,!freebsd,!linux,!solaris package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go index d550851658523..20af4074c2fa9 100644 --- a/vendor/golang.org/x/net/ipv4/sys_stub.go +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go index b7f2d6e5c18e0..dd454025c74c6 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go index e15c22c748774..54f9e13948adf 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go index e2edebdb81245..78374a5250fdc 100644 --- a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package ipv4 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go index 2733ddbe272d9..a8f04e7b3b81e 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go index 9c90844aac1a7..51fbbb1f17056 100644 --- a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go index b7e8643fc9c28..eb28ce753459e 100644 --- a/vendor/golang.org/x/net/ipv6/control_stub.go +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go index 63e475db83186..9c73b8647eb56 100644 --- a/vendor/golang.org/x/net/ipv6/control_unix.go +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go index 120bf8775839e..2814534a0b230 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_bsd.go +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd -// +build aix darwin dragonfly freebsd netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go index d60136a9016eb..c92c9b51e1eb2 100644 --- a/vendor/golang.org/x/net/ipv6/icmp_stub.go +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go index b0692e4304f50..be04e4d6ae304 100644 --- a/vendor/golang.org/x/net/ipv6/payload_cmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go index cd0ff508388d5..29b9ccf691a49 100644 --- a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go index 37c6287130ffb..34dfed588ecd2 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_posix.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go index 32fd8664ceb88..a09c3aaf26cf1 100644 --- a/vendor/golang.org/x/net/ipv6/sockopt_stub.go +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go index a47182afb9d6d..93c8efc46879a 100644 --- a/vendor/golang.org/x/net/ipv6/sys_aix.go +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -4,7 +4,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go index 6ff9950d13586..5c9cb444713d4 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go index 485290cb824b4..dc70494680f0b 100644 --- a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go index b5661fb8f066b..e39f75f49fa55 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go index cb006618720ac..8532a8f5de748 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux -// +build !linux package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go index bde41a6cef91f..9f3bc2afde2cc 100644 --- a/vendor/golang.org/x/net/ipv6/sys_bsd.go +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build dragonfly || netbsd || openbsd -// +build dragonfly netbsd openbsd package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go index 023488a49cde0..b40f5c685bdf1 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || solaris || zos -// +build aix darwin freebsd linux solaris zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go index acdf2e5cf7c42..6526aad581201 100644 --- a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !freebsd && !linux && !solaris && !zos -// +build !aix,!darwin,!freebsd,!linux,!solaris,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go index 5807bba39266c..76602c34e6f8e 100644 --- a/vendor/golang.org/x/net/ipv6/sys_stub.go +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go index f604b0f3b4080..668716df4df5c 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -3,7 +3,6 @@ // Added for go1.11 compatibility //go:build aix -// +build aix package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go index 598fbfa06f772..6a53284dbe5fb 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build loong64 -// +build loong64 package ipv6 diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go index d4f78e405ab08..13b3472057af1 100644 --- a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs defs_linux.go //go:build riscv64 -// +build riscv64 package ipv6 diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 7a0b9ed1029e2..2459d069f7341 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -47,6 +47,10 @@ type Config struct { // client ID & client secret sent. The zero value means to // auto-detect. AuthStyle oauth2.AuthStyle + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // Token uses client credentials to retrieve a token. @@ -103,7 +107,7 @@ func (c *tokenSource) Token() (*oauth2.Token, error) { v[k] = p } - tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle)) + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle), c.conf.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*oauth2.RetrieveError)(rErr) diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 0000000000000..e99c92f39c784 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 2cf71f0f93f8c..12b12a30c5562 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -19,7 +19,10 @@ import ( "golang.org/x/oauth2/authhandler" ) -const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + universeDomainDefault = "googleapis.com" +) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: @@ -37,6 +40,18 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return universeDomainDefault + } + return c.universeDomain } // DefaultCredentials is the old name of Credentials. @@ -200,15 +215,23 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = universeDomainDefault + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } ts = newErrWrappingTokenSource(ts) return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index ca717634a3f9b..03c42c6f87970 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -101,6 +101,8 @@ // executable-sourced credentials), please check out: // https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in // +// # Security considerations +// // Note that this library does not perform any validation on the token_url, token_info_url, // or service_account_impersonation_url fields of the credential configuration. // It is not recommended to use a credential configuration that you did not generate with diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index cc1223889e2e4..c66c53527db42 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -16,14 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } // MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. @@ -95,10 +97,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -106,12 +109,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -131,6 +135,9 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } @@ -199,6 +206,19 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) case impersonatedServiceAccount: if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2bf3202b290f1..bd4efd19baa2a 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -274,49 +274,6 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } -func (cs awsCredentialSource) validateMetadataServers() error { - if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { - return err - } - if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { - return err - } - return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") -} - -var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} - -func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { - if metadataUrl == "" { - // Zero value means use default, which is valid. - return true - } - - u, err := url.Parse(metadataUrl) - if err != nil { - // Unparseable URL means invalid - return false - } - - for _, validHostname := range validHostnames { - if u.Hostname() == validHostname { - // If it's one of the valid hostnames, everything is good - return true - } - } - - // hostname not found in our allowlist, so not valid - return false -} - -func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { - if !cs.isValidMetadataServer(metadataUrl) { - return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) - } - - return nil -} - func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -339,6 +296,10 @@ func shouldUseMetadataServer() bool { return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() } +func (cs awsCredentialSource) credentialSourceType() string { + return "aws" +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { headers := make(map[string]string) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index dcd252a61ccfe..33288d3677341 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -8,13 +8,12 @@ import ( "context" "fmt" "net/http" - "net/url" "regexp" "strconv" - "strings" "time" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" ) // now aliases time.Now for testing @@ -63,31 +62,10 @@ type Config struct { WorkforcePoolUserProject string } -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - var ( validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - func validateWorkforceAudience(input string) bool { return validWorkforceAudiencePattern.MatchString(input) } @@ -185,10 +163,6 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } - if err := awsCredSource.validateMetadataServers(); err != nil { - return nil, err - } - return awsCredSource, nil } } else if c.CredentialSource.File != "" { @@ -202,6 +176,7 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { } type baseCredentialSource interface { + credentialSourceType() string subjectToken() (string, error) } @@ -211,6 +186,15 @@ type tokenSource struct { conf *Config } +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + // Token allows tokenSource to conform to the oauth2.TokenSource interface. func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf @@ -224,7 +208,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := stsTokenExchangeRequest{ + stsRequest := stsexchange.TokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -234,7 +218,8 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, @@ -247,7 +232,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { "userProject": conf.WorkforcePoolUserProject, } } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go index 579bcce5f28be..6497dc022efd8 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -233,6 +233,10 @@ func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte return "", tokenTypeError(source) } +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + func (cs executableCredentialSource) subjectToken() (string, error) { if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { return token, err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go index e953ddb473a7c..f35f73c5cb2f4 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -19,6 +19,10 @@ type fileCredentialSource struct { Format format } +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go new file mode 100644 index 0000000000000..1d5aad2e2d924 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index 16dca6541d98e..606bb4e80010d 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -23,6 +23,10 @@ type urlCredentialSource struct { ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 0000000000000..cb58207074667 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce294571..ebd520eace550 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5fcbfd0..1a0bebd1595d2 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda53e5f..e83ddeef0fc29 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe34eaf..90a2c3d6dcb1a 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 0000000000000..50593b6dfec6b --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce9764be7f..5bbb332174885 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go index 7d419d3760ceb..f93c740b638ca 100644 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.20 -// +build go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go index 1795c18ace06f..88ce33434e238 100644 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.20 -// +build !go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go index 8473fb7922c16..4051830982ad3 100644 --- a/vendor/golang.org/x/sync/singleflight/singleflight.go +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -31,6 +31,15 @@ func (p *panicError) Error() string { return fmt.Sprintf("%v\n\n%s", p.value, p.stack) } +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + func newPanicError(v interface{}) error { stack := debug.Stack() diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6202638bae86f..fdcaa974d23b3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -582,7 +584,7 @@ ccflags="$@" $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEYCTL_/ || $2 ~ /^PERF_/ || - $2 ~ /^SECCOMP_MODE_/ || + $2 ~ /^SECCOMP_/ || $2 ~ /^SEEK_/ || $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || @@ -603,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index c73cfe2f10b77..36bf8399f4fa9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1785,6 +1785,8 @@ const ( LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 + LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 + LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef @@ -2127,6 +2129,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2411,6 +2467,7 @@ const ( PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_SET = 0x1 + PR_MDWE_NO_INHERIT = 0x2 PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b @@ -2615,8 +2672,9 @@ const ( RTAX_FEATURES = 0xc RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_MASK = 0x1f RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TCP_USEC_TS = 0x10 RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_HOPLIMIT = 0xa RTAX_INITCWND = 0xb @@ -2859,9 +2917,38 @@ const ( SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 + SECCOMP_ADDFD_FLAG_SEND = 0x2 + SECCOMP_ADDFD_FLAG_SETFD = 0x1 + SECCOMP_FILTER_FLAG_LOG = 0x2 + SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8 + SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4 + SECCOMP_FILTER_FLAG_TSYNC = 0x1 + SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10 + SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20 + SECCOMP_GET_ACTION_AVAIL = 0x2 + SECCOMP_GET_NOTIF_SIZES = 0x3 + SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100 + SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101 + SECCOMP_IOC_MAGIC = '!' SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECCOMP_RET_ACTION = 0x7fff0000 + SECCOMP_RET_ACTION_FULL = 0xffff0000 + SECCOMP_RET_ALLOW = 0x7fff0000 + SECCOMP_RET_DATA = 0xffff + SECCOMP_RET_ERRNO = 0x50000 + SECCOMP_RET_KILL = 0x0 + SECCOMP_RET_KILL_PROCESS = 0x80000000 + SECCOMP_RET_KILL_THREAD = 0x0 + SECCOMP_RET_LOG = 0x7ffc0000 + SECCOMP_RET_TRACE = 0x7ff00000 + SECCOMP_RET_TRAP = 0x30000 + SECCOMP_RET_USER_NOTIF = 0x7fc00000 + SECCOMP_SET_MODE_FILTER = 0x1 + SECCOMP_SET_MODE_STRICT = 0x0 + SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1 + SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1 SECRETMEM_MAGIC = 0x5345434d SECURITYFS_MAGIC = 0x73636673 SEEK_CUR = 0x1 @@ -3021,6 +3108,7 @@ const ( SOL_TIPC = 0x10f SOL_TLS = 0x11a SOL_UDP = 0x11 + SOL_VSOCK = 0x11f SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 4920821cf3b23..42ff8c3c1b064 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a0c1e411275cc..dca436004fa4d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -282,6 +282,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c63985560f61f..5cca668ac302f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -288,6 +288,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 47cc62e25c148..d8cae6d153403 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -278,6 +278,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 27ac4a09e22a6..28e39afdcb4af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -275,6 +275,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 54694642a5de9..cd66e92cb4264 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 3adb81d75822c..c1595eba78e34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 2dfe98f0d1b15..ee9456b0da743 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index f5398f84f041b..8cfca81e1b566 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -281,6 +281,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x80 SIOCATMARK = 0x40047307 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index c54f152d68fd2..60b0deb3af770 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -336,6 +336,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 76057dc72fb5d..f90aa7281bfb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e0c3725e2b89c..ba9e01503383a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -340,6 +340,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 18f2813ed54b7..07cdfd6e9fd3b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -272,6 +272,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 11619d4ec88f2..2f1dd214a74ef 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -344,6 +344,9 @@ const ( SCM_TIMESTAMPNS = 0x23 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104 SFD_CLOEXEC = 0x80000 SFD_NONBLOCK = 0x800 SIOCATMARK = 0x8905 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 396d994da79c4..f40519d901801 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -335,6 +335,9 @@ const ( SCM_TIMESTAMPNS = 0x21 SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 + SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 + SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102 + SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104 SFD_CLOEXEC = 0x400000 SFD_NONBLOCK = 0x4000 SF_FP = 0x38 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a1d061597ccc7..9dc42410b78b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 5b2a74097786b..0d3a0751cd435 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index f6eda1344a83e..c39f7776db33f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 55df20ae9d8d1..57571d072fe6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 8c1155cbc0874..e62963e67e204 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 7cc80c58d985e..00831354c82f4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 0688737f49443..79029ed584825 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) { var libc_unveil_trampoline_addr uintptr //go:cgo_import_dynamic libc_unveil unveil "libc.so" - - diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index fcf3ecbddee10..0cc3ce496e229 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -448,4 +448,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f56dc2504ae14..856d92d69ef91 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -371,4 +371,7 @@ const ( SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 974bf246767e3..8d467094cf57a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -412,4 +412,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 39a2739e23104..edc173244d0d8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -315,4 +315,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index cf9c9d77e10fb..445eba2061554 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -309,4 +309,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 10b7362ef4424..adba01bca7015 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index cd4d8b4fd35e9..014c4e9c7a75b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 2c0efca818b32..ccc97d74d05dd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -362,4 +362,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 SYS_FCHMODAT2 = 5452 + SYS_MAP_SHADOW_STACK = 5453 + SYS_FUTEX_WAKE = 5454 + SYS_FUTEX_WAIT = 5455 + SYS_FUTEX_REQUEUE = 5456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index a72e31d391d5d..ec2b64a95d743 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -432,4 +432,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 SYS_FCHMODAT2 = 4452 + SYS_MAP_SHADOW_STACK = 4453 + SYS_FUTEX_WAKE = 4454 + SYS_FUTEX_WAIT = 4455 + SYS_FUTEX_REQUEUE = 4456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index c7d1e374713c0..21a839e338b30 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -439,4 +439,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index f4d4838c870dd..c11121ec3b4dd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index b64f0e59114d0..909b631fcb45a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -411,4 +411,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 95711195a0648..e49bed16ea6b6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -316,4 +316,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f94e943bc4f57..66017d2d32b35 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -377,4 +377,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index ba0c2bc5154a7..47bab18dcedb1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -390,4 +390,8 @@ const ( SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 + SYS_FUTEX_WAKE = 454 + SYS_FUTEX_WAIT = 455 + SYS_FUTEX_REQUEUE = 456 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index bbf8399ff5860..dc0c955eecdf8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -174,7 +174,8 @@ type FscryptPolicyV2 struct { Contents_encryption_mode uint8 Filenames_encryption_mode uint8 Flags uint8 - _ [4]uint8 + Log2_data_unit_size uint8 + _ [3]uint8 Master_key_identifier [16]uint8 } @@ -455,60 +456,63 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 - Pacing_rate uint64 - Max_pacing_rate uint64 - Bytes_acked uint64 - Bytes_received uint64 - Segs_out uint32 - Segs_in uint32 - Notsent_bytes uint32 - Min_rtt uint32 - Data_segs_in uint32 - Data_segs_out uint32 - Delivery_rate uint64 - Busy_time uint64 - Rwnd_limited uint64 - Sndbuf_limited uint64 - Delivered uint32 - Delivered_ce uint32 - Bytes_sent uint64 - Bytes_retrans uint64 - Dsack_dups uint32 - Reord_seen uint32 - Rcv_ooopack uint32 - Snd_wnd uint32 - Rcv_wnd uint32 - Rehash uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 + Total_rto uint16 + Total_rto_recoveries uint16 + Total_rto_time uint32 } type CanFilter struct { @@ -551,7 +555,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0xf0 + SizeofTCPInfo = 0xf8 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -3399,7 +3403,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 ) type FsverityDigest struct { @@ -4183,7 +4187,8 @@ const ( ) type LandlockRulesetAttr struct { - Access_fs uint64 + Access_fs uint64 + Access_net uint64 } type LandlockPathBeneathAttr struct { @@ -5134,7 +5139,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1b + NL80211_FREQUENCY_ATTR_MAX = 0x1c NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5547,7 +5552,7 @@ const ( NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_WORLD = 0x1 - NL80211_REG_RULE_ATTR_MAX = 0x7 + NL80211_REG_RULE_ATTR_MAX = 0x8 NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KEK = 0x1 diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go index b8ad192506890..d4577a4238871 100644 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) { return nil, err } defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break + size := unsafe.Sizeof(*block) + for *block != 0 { + // find NUL terminator + end := unsafe.Pointer(block) + for *(*uint16)(end) != 0 { + end = unsafe.Add(end, size) } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + + entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size) + env = append(env, UTF16ToString(entry)) + block = (*uint16)(unsafe.Add(end, size)) } return env, nil } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 47dc57967690b..6395a031d45d8 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) } - - return string(utf16.Decode(unsafe.Slice(p, n))) + return UTF16ToString(unsafe.Slice(p, n)) } func Getpagesize() int { return 4096 } @@ -194,6 +193,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 146a1f0196f93..e8791c82c30f4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -342,6 +342,7 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -2988,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index f6b5ecf47cf0c..b4b4c615ddab3 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -624,7 +624,7 @@ ] }, "search": { - "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get`", + "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get` or has super admin privileges.", "flatPath": "v1/organizations:search", "httpMethod": "POST", "id": "cloudresourcemanager.organizations.search", @@ -1032,7 +1032,7 @@ ] }, "setIamPolicy": { - "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the GCP Console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. If the project is part of an organization, you can remove all owners, potentially making the organization inaccessible. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", + "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the Google Cloud console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. If the project is part of an organization, you can remove all owners, potentially making the organization inaccessible. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", "flatPath": "v1/projects/{resource}:setIamPolicy", "httpMethod": "POST", "id": "cloudresourcemanager.projects.setIamPolicy", @@ -1171,7 +1171,7 @@ } } }, - "revision": "20230129", + "revision": "20231022", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1902,7 +1902,7 @@ "description": "Properties of the object. Contains field @type with type URL.", "type": "any" }, - "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", + "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.", "type": "object" } }, @@ -1956,7 +1956,7 @@ "type": "string" }, "displayName": { - "description": "A human-readable string that refers to the Organization in the GCP Console UI. This string is set by the server and cannot be changed. The string will be set to the primary domain (for example, \"google.com\") of the G Suite customer that owns the organization.", + "description": "A human-readable string that refers to the Organization in the Google Cloud console. This string is set by the server and cannot be changed. The string will be set to the primary domain (for example, \"google.com\") of the G Suite customer that owns the organization.", "type": "string" }, "lifecycleState": { @@ -1996,7 +1996,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index a94af0185f35b..5348835a1902c 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -8,6 +8,17 @@ // // For product documentation, see: https://cloud.google.com/resource-manager // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,28 +28,31 @@ // ctx := context.Background() // cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithScopes(cloudresourcemanager.CloudPlatformReadOnlyScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // cloudresourcemanagerService, err := cloudresourcemanager.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v1" import ( @@ -1649,8 +1663,8 @@ type Operation struct { // `operations/{unique_id}`. Name string `json:"name,omitempty"` - // Response: The normal response of the operation in case of success. If - // the original method returns no data on success, such as `Delete`, the + // Response: The normal, successful response of the operation. If the + // original method returns no data on success, such as `Delete`, the // response is `google.protobuf.Empty`. If the original method is // standard `Get`/`Create`/`Update`, the response should be the // resource. For other methods, the response should have the type @@ -1764,9 +1778,10 @@ type Organization struct { CreationTime string `json:"creationTime,omitempty"` // DisplayName: A human-readable string that refers to the Organization - // in the GCP Console UI. This string is set by the server and cannot be - // changed. The string will be set to the primary domain (for example, - // "google.com") of the G Suite customer that owns the organization. + // in the Google Cloud console. This string is set by the server and + // cannot be changed. The string will be set to the primary domain (for + // example, "google.com") of the G Suite customer that owns the + // organization. DisplayName string `json:"displayName,omitempty"` // LifecycleState: The organization's current lifecycle state. Assigned @@ -1863,7 +1878,7 @@ func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { // both. To learn which resources support conditions in their IAM // policies, see the IAM documentation // (https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": +// **JSON example:** ``` { "bindings": [ { "role": // "roles/resourcemanager.organizationAdmin", "members": [ // "user:mike@example.com", "group:admins@example.com", // "domain:google.com", @@ -1872,17 +1887,17 @@ func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { // "user:eve@example.com" ], "condition": { "title": "expirable access", // "description": "Does not grant access after Sep 2020", "expression": // "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], -// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - -// members: - user:mike@example.com - group:admins@example.com - -// domain:google.com - +// "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` +// bindings: - members: - user:mike@example.com - +// group:admins@example.com - domain:google.com - // serviceAccount:my-project-id@appspot.gserviceaccount.com role: // roles/resourcemanager.organizationAdmin - members: - // user:eve@example.com role: roles/resourcemanager.organizationViewer // condition: title: expirable access description: Does not grant access // after Sep 2020 expression: request.time < // timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 -// For a description of IAM and its features, see the IAM documentation -// (https://cloud.google.com/iam/docs/). +// ``` For a description of IAM and its features, see the IAM +// documentation (https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. @@ -5199,6 +5214,7 @@ type OrganizationsSearchCall struct { // in an unspecified order. New Organizations do not necessarily appear // at the end of the results. Search will only return organizations on // which the user has the permission `resourcemanager.organizations.get` +// or has super admin privileges. func (r *OrganizationsService) Search(searchorganizationsrequest *SearchOrganizationsRequest) *OrganizationsSearchCall { c := &OrganizationsSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.searchorganizationsrequest = searchorganizationsrequest @@ -5293,7 +5309,7 @@ func (c *OrganizationsSearchCall) Do(opts ...googleapi.CallOption) (*SearchOrgan } return ret, nil // { - // "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get`", + // "description": "Searches Organization resources that are visible to the user and satisfy the specified filter. This method returns Organizations in an unspecified order. New Organizations do not necessarily appear at the end of the results. Search will only return organizations on which the user has the permission `resourcemanager.organizations.get` or has super admin privileges.", // "flatPath": "v1/organizations:search", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.search", @@ -7531,7 +7547,7 @@ type ProjectsSetIamPolicyCall struct { // role using `setIamPolicy()`. The user must be granted the owner role // using the Cloud Platform Console and must explicitly accept the // invitation. + You can only grant ownership of a project to a member -// by using the GCP Console. Inviting a member will deliver an +// by using the Google Cloud console. Inviting a member will deliver an // invitation email that they must accept. An invitation email is not // generated if you are granting a role other than owner, or if both the // member you are inviting and the project are part of your @@ -7648,7 +7664,7 @@ func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, er } return ret, nil // { - // "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the GCP Console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. If the project is part of an organization, you can remove all owners, potentially making the organization inaccessible. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", + // "description": "Sets the IAM access control policy for the specified Project. CAUTION: This method will replace the existing policy, and cannot be used to append additional IAM settings. NOTE: Removing service accounts from policies or changing their roles can render services completely inoperable. It is important to understand how the service account is being used before removing or updating its roles. For additional information about `resource` (e.g. my-project-id) structure and identification, see [Resource Names](https://cloud.google.com/apis/design/resource_names). The following constraints apply when using `setIamPolicy()`: + Project does not support `allUsers` and `allAuthenticatedUsers` as `members` in a `Binding` of a `Policy`. + The owner role can be granted to a `user`, `serviceAccount`, or a group that is part of an organization. For example, group@myownpersonaldomain.com could be added as an owner to a project in the myownpersonaldomain.com organization, but not the examplepetstore.com organization. + Service accounts can be made owners of a project directly without any restrictions. However, to be added as an owner, a user must be invited via Cloud Platform console and must accept the invitation. + A user cannot be granted the owner role using `setIamPolicy()`. The user must be granted the owner role using the Cloud Platform Console and must explicitly accept the invitation. + You can only grant ownership of a project to a member by using the Google Cloud console. Inviting a member will deliver an invitation email that they must accept. An invitation email is not generated if you are granting a role other than owner, or if both the member you are inviting and the project are part of your organization. + If the project is not part of an organization, there must be at least one owner who has accepted the Terms of Service (ToS) agreement in the policy. Calling `setIamPolicy()` to remove the last ToS-accepted owner from the policy will fail. This restriction also applies to legacy projects that no longer have owners who have accepted the ToS. Edits to IAM policies will be rejected until the lack of a ToS-accepting owner is rectified. If the project is part of an organization, you can remove all owners, potentially making the organization inaccessible. Authorization requires the Google IAM permission `resourcemanager.projects.setIamPolicy` on the project", // "flatPath": "v1/projects/{resource}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setIamPolicy", diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 8b77f64d4256c..4913c0f596d53 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -135,7 +135,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -173,6 +173,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/acceleratorTypes", @@ -239,7 +244,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -305,7 +310,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -343,6 +348,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/addresses", @@ -498,7 +508,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -664,7 +674,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -702,6 +712,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/autoscalers", @@ -857,7 +872,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -1169,6 +1184,47 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getIamPolicy": { + "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + "flatPath": "projects/{project}/global/backendBuckets/{resource}/getIamPolicy", + "httpMethod": "GET", + "id": "compute.backendBuckets.getIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "optionsRequestedPolicyVersion": { + "description": "Requested IAM Policy version.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendBuckets/{resource}/getIamPolicy", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a BackendBucket resource in the specified project using the data included in the request.", "flatPath": "projects/{project}/global/backendBuckets", @@ -1213,7 +1269,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -1341,6 +1397,81 @@ "https://www.googleapis.com/auth/compute" ] }, + "setIamPolicy": { + "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + "flatPath": "projects/{project}/global/backendBuckets/{resource}/setIamPolicy", + "httpMethod": "POST", + "id": "compute.backendBuckets.setIamPolicy", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendBuckets/{resource}/setIamPolicy", + "request": { + "$ref": "GlobalSetPolicyRequest" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/global/backendBuckets/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.backendBuckets.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendBuckets/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "update": { "description": "Updates the specified BackendBucket resource with the data included in the request.", "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", @@ -1438,7 +1569,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -1476,6 +1607,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/backendServices", @@ -1729,7 +1865,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -1774,6 +1910,61 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listUsable": { + "description": "Retrieves an aggregated list of all usable backend services in the specified project.", + "flatPath": "projects/{project}/global/backendServices/listUsable", + "httpMethod": "GET", + "id": "compute.backendServices.listUsable", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/backendServices/listUsable", + "response": { + "$ref": "BackendServiceListUsable" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "flatPath": "projects/{project}/global/backendServices/{backendService}", @@ -1935,6 +2126,44 @@ "https://www.googleapis.com/auth/compute" ] }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/global/backendServices/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.backendServices.testIamPermissions", + "parameterOrder": [ + "project", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/backendServices/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "update": { "description": "Updates the specified BackendService resource with the data included in the request. For more information, see Backend services overview.", "flatPath": "projects/{project}/global/backendServices/{backendService}", @@ -1991,7 +2220,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -2029,6 +2258,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/diskTypes", @@ -2095,7 +2329,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -2211,7 +2445,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -2249,6 +2483,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/disks", @@ -2554,7 +2793,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -3169,7 +3408,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -3594,7 +3833,7 @@ "id": "compute.firewallPolicies.list", "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -4023,7 +4262,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -4166,7 +4405,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -4204,6 +4443,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/forwardingRules", @@ -4359,7 +4603,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -4683,7 +4927,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -4929,7 +5173,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5297,7 +5541,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5353,7 +5597,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5418,7 +5662,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5456,6 +5700,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/operations", @@ -5544,7 +5793,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5695,7 +5944,7 @@ "id": "compute.globalOrganizationOperations.list", "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5860,7 +6109,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5961,7 +6210,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -5999,6 +6248,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/healthChecks", @@ -6129,7 +6383,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -6380,7 +6634,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -6631,7 +6885,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7055,7 +7309,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7316,7 +7570,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7354,6 +7608,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/instanceGroupManagers", @@ -7686,7 +7945,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7749,7 +8008,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7818,7 +8077,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -7887,7 +8146,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -8347,7 +8606,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -8385,6 +8644,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/instanceGroups", @@ -8535,7 +8799,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -8598,7 +8862,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -8768,7 +9032,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -8806,6 +9070,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/instanceTemplates", @@ -8977,7 +9246,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -9218,7 +9487,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -9256,6 +9525,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/instances", @@ -9929,7 +10203,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -9993,7 +10267,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -10745,11 +11019,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setServiceAccount": { - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", "httpMethod": "POST", - "id": "compute.instances.setServiceAccount", + "id": "compute.instances.setSecurityPolicy", "parameterOrder": [ "project", "zone", @@ -10757,9 +11031,8 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to start.", + "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -10776,16 +11049,16 @@ "type": "string" }, "zone": { - "description": "The name of the zone for this request.", + "description": "Name of the zone scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", "request": { - "$ref": "InstancesSetServiceAccountRequest" + "$ref": "InstancesSetSecurityPolicyRequest" }, "response": { "$ref": "Operation" @@ -10795,11 +11068,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setShieldedInstanceIntegrityPolicy": { - "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - "httpMethod": "PATCH", - "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "setServiceAccount": { + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "httpMethod": "POST", + "id": "compute.instances.setServiceAccount", "parameterOrder": [ "project", "zone", @@ -10807,7 +11080,57 @@ ], "parameters": { "instance": { - "description": "Name or id of the instance scoping this request.", + "description": "Name of the instance resource to start.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "request": { + "$ref": "InstancesSetServiceAccountRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name or id of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -11501,7 +11824,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -11539,6 +11862,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/interconnectAttachments", @@ -11699,7 +12027,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -11900,7 +12228,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -11994,7 +12322,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -12118,7 +12446,7 @@ ] }, "getDiagnostics": { - "description": "Returns the interconnectDiagnostics for the specified Interconnect.", + "description": "Returns the interconnectDiagnostics for the specified Interconnect. In the event of a global outage, do not use this API to make decisions about where to redirect your network traffic. Unlike a VLAN attachment, which is regional, a Cloud Interconnect connection is a global resource. A global outage can prevent this API from functioning properly.", "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", "httpMethod": "GET", "id": "compute.interconnects.getDiagnostics", @@ -12152,6 +12480,41 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getMacsecConfig": { + "description": "Returns the interconnectMacsecConfig for the specified Interconnect.", + "flatPath": "projects/{project}/global/interconnects/{interconnect}/getMacsecConfig", + "httpMethod": "GET", + "id": "compute.interconnects.getMacsecConfig", + "parameterOrder": [ + "project", + "interconnect" + ], + "parameters": { + "interconnect": { + "description": "Name of the interconnect resource to query.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnects/{interconnect}/getMacsecConfig", + "response": { + "$ref": "InterconnectsGetMacsecConfigResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates an Interconnect in the specified project using the data included in the request.", "flatPath": "projects/{project}/global/interconnects", @@ -12196,7 +12559,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -12563,7 +12926,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -12851,7 +13214,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -12985,7 +13348,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13023,6 +13386,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/machineTypes", @@ -13089,7 +13457,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13155,7 +13523,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13193,6 +13561,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/networkAttachments", @@ -13397,7 +13770,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13449,6 +13822,56 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "httpMethod": "PATCH", + "id": "compute.networkAttachments.patch", + "parameterOrder": [ + "project", + "region", + "networkAttachment" + ], + "parameters": { + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "request": { + "$ref": "NetworkAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", @@ -13554,7 +13977,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13592,6 +14015,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/networkEdgeSecurityServices", @@ -13816,7 +14244,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -13854,6 +14282,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/networkEndpointGroups", @@ -14100,7 +14533,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -14163,7 +14596,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -14658,7 +15091,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -15156,7 +15589,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -15225,7 +15658,7 @@ "type": "string" }, "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -15516,7 +15949,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -15554,6 +15987,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/nodeGroups", @@ -15816,7 +16254,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -15880,7 +16318,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16194,7 +16632,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16232,6 +16670,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/nodeTemplates", @@ -16436,7 +16879,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16593,7 +17036,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16631,6 +17074,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/nodeTypes", @@ -16697,7 +17145,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16763,7 +17211,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -16801,6 +17249,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/packetMirrorings", @@ -16956,7 +17409,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -17301,7 +17754,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -17355,7 +17808,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -17580,6 +18033,44 @@ }, "publicAdvertisedPrefixes": { "methods": { + "announce": { + "description": "Announces the specified PublicAdvertisedPrefix", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce", + "httpMethod": "POST", + "id": "compute.publicAdvertisedPrefixes.announce", + "parameterOrder": [ + "project", + "publicAdvertisedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicAdvertisedPrefix": { + "description": "The name of the public advertised prefix. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified PublicAdvertisedPrefix", "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}", @@ -17698,7 +18189,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -17784,6 +18275,44 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "withdraw": { + "description": "Withdraws the specified PublicAdvertisedPrefix", + "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw", + "httpMethod": "POST", + "id": "compute.publicAdvertisedPrefixes.withdraw", + "parameterOrder": [ + "project", + "publicAdvertisedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicAdvertisedPrefix": { + "description": "The name of the public advertised prefix. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -17799,7 +18328,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -17837,6 +18366,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/publicDelegatedPrefixes", @@ -17849,6 +18383,51 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "announce": { + "description": "Announces the specified PublicDelegatedPrefix in the given region.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce", + "httpMethod": "POST", + "id": "compute.publicDelegatedPrefixes.announce", + "parameterOrder": [ + "project", + "region", + "publicDelegatedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicDelegatedPrefix": { + "description": "The name of the public delegated prefix. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the public delegated prefix is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified PublicDelegatedPrefix in the given region.", "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}", @@ -17992,7 +18571,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -18093,6 +18672,51 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "withdraw": { + "description": "Withdraws the specified PublicDelegatedPrefix in the given region.", + "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw", + "httpMethod": "POST", + "id": "compute.publicDelegatedPrefixes.withdraw", + "parameterOrder": [ + "project", + "region", + "publicDelegatedPrefix" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "publicDelegatedPrefix": { + "description": "The name of the public delegated prefix. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the public delegated prefix is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -18241,7 +18865,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -18630,7 +19254,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -18682,6 +19306,68 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "listUsable": { + "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", + "httpMethod": "GET", + "id": "compute.regionBackendServices.listUsable", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/listUsable", + "response": { + "$ref": "BackendServiceListUsable" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "patch": { "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Understanding backend services This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", @@ -18777,6 +19463,101 @@ "https://www.googleapis.com/auth/compute" ] }, + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + "httpMethod": "POST", + "id": "compute.regionBackendServices.setSecurityPolicy", + "parameterOrder": [ + "project", + "region", + "backendService" + ], + "parameters": { + "backendService": { + "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "testIamPermissions": { + "description": "Returns permissions that a caller has on the specified resource.", + "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", + "httpMethod": "POST", + "id": "compute.regionBackendServices.testIamPermissions", + "parameterOrder": [ + "project", + "region", + "resource" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "resource": { + "description": "Name or id of the resource for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", + "request": { + "$ref": "TestPermissionsRequest" + }, + "response": { + "$ref": "TestPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "update": { "description": "Updates the specified regional BackendService resource with the data included in the request. For more information, see Backend services overview .", "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}", @@ -18841,7 +19622,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -18879,6 +19660,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/commitments", @@ -18987,7 +19773,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -19158,7 +19944,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -19552,7 +20338,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -20190,7 +20976,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -20438,7 +21224,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -20962,7 +21748,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21025,7 +21811,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21094,7 +21880,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21163,7 +21949,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21618,7 +22404,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21681,7 +22467,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -21936,7 +22722,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -22038,6 +22824,54 @@ }, "regionNetworkEndpointGroups": { "methods": { + "attachNetworkEndpoints": { + "description": "Attach a list of network endpoints to the specified network endpoint group.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.attachNetworkEndpoints", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + "request": { + "$ref": "RegionNetworkEndpointGroupsAttachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", @@ -22083,6 +22917,54 @@ "https://www.googleapis.com/auth/compute" ] }, + "detachNetworkEndpoints": { + "description": "Detach the network endpoint from the specified network endpoint group.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.detachNetworkEndpoints", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group you are detaching network endpoints from. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + "request": { + "$ref": "RegionNetworkEndpointGroupsDetachEndpointsRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "get": { "description": "Returns the specified network endpoint group.", "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", @@ -22176,7 +23058,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -22226,6 +23108,75 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "listNetworkEndpoints": { + "description": "Lists the network endpoints in the specified network endpoint group.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.listNetworkEndpoints", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "networkEndpointGroup": { + "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + "response": { + "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] } } }, @@ -22731,7 +23682,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -23232,7 +24183,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -23381,7 +24332,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -23480,6 +24431,56 @@ }, "regionSecurityPolicies": { "methods": { + "addRule": { + "description": "Inserts a rule into a security policy.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", + "httpMethod": "POST", + "id": "compute.regionSecurityPolicies.addRule", + "parameterOrder": [ + "project", + "region", + "securityPolicy" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "delete": { "description": "Deletes the specified policy.", "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", @@ -23570,6 +24571,55 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getRule": { + "description": "Gets a rule at the specified priority.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", + "httpMethod": "GET", + "id": "compute.regionSecurityPolicies.getRule", + "parameterOrder": [ + "project", + "region", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to get from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to which the queried rule belongs.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", + "response": { + "$ref": "SecurityPolicyRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "description": "Creates a new policy in the specified project using the data included in the request.", "flatPath": "projects/{project}/regions/{region}/securityPolicies", @@ -23628,7 +24678,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -23716,6 +24766,12 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be cleared as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", @@ -23729,6 +24785,116 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "patchRule": { + "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", + "httpMethod": "POST", + "id": "compute.regionSecurityPolicies.patchRule", + "parameterOrder": [ + "project", + "region", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to patch.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be cleared as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, + "validateOnly": { + "description": "If true, the request will not be committed.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", + "request": { + "$ref": "SecurityPolicyRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "removeRule": { + "description": "Deletes a rule at the specified priority.", + "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", + "httpMethod": "POST", + "id": "compute.regionSecurityPolicies.removeRule", + "parameterOrder": [ + "project", + "region", + "securityPolicy" + ], + "parameters": { + "priority": { + "description": "The priority of the rule to remove from the security policy.", + "format": "int32", + "location": "query", + "type": "integer" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "securityPolicy": { + "description": "Name of the security policy to update.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -23877,7 +25043,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -24074,7 +25240,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -24137,7 +25303,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -24385,7 +25551,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -24634,7 +25800,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -24983,7 +26149,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -25182,7 +26348,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -25428,7 +26594,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -25487,7 +26653,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -25525,6 +26691,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/reservations", @@ -25729,7 +26900,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -25997,7 +27168,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -26035,6 +27206,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/resourcePolicies", @@ -26239,7 +27415,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -26452,7 +27628,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -26490,6 +27666,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/routers", @@ -26592,6 +27773,53 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getNatIpInfo": { + "description": "Retrieves runtime NAT IP information.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "httpMethod": "GET", + "id": "compute.routers.getNatIpInfo", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "natName": { + "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "response": { + "$ref": "NatIpInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getNatMappingInfo": { "description": "Retrieves runtime Nat mapping information of VM endpoints.", "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", @@ -26604,7 +27832,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -26764,7 +27992,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27084,7 +28312,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27185,7 +28413,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27223,6 +28451,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/securityPolicies", @@ -27399,7 +28632,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27454,7 +28687,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27527,6 +28760,12 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" + }, + "updateMask": { + "description": "Indicates fields to be cleared as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" } }, "path": "projects/{project}/global/securityPolicies/{securityPolicy}", @@ -27571,6 +28810,12 @@ "required": true, "type": "string" }, + "updateMask": { + "description": "Indicates fields to be cleared as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, "validateOnly": { "description": "If true, the request will not be committed.", "location": "query", @@ -27680,7 +28925,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -27718,6 +28963,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/serviceAttachments", @@ -27922,7 +29172,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28115,6 +29365,77 @@ } } }, + "snapshotSettings": { + "methods": { + "get": { + "description": "Get snapshot settings.", + "flatPath": "projects/{project}/global/snapshotSettings", + "httpMethod": "GET", + "id": "compute.snapshotSettings.get", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/snapshotSettings", + "response": { + "$ref": "SnapshotSettings" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patch snapshot settings.", + "flatPath": "projects/{project}/global/snapshotSettings", + "httpMethod": "PATCH", + "id": "compute.snapshotSettings.patch", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/snapshotSettings", + "request": { + "$ref": "SnapshotSettings" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "snapshots": { "methods": { "delete": { @@ -28276,7 +29597,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28447,7 +29768,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28485,6 +29806,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/sslCertificates", @@ -28615,7 +29941,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28674,7 +30000,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28712,6 +30038,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/sslPolicies", @@ -28840,7 +30171,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28895,7 +30226,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -28995,7 +30326,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -29033,6 +30364,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/subnetworks", @@ -29287,7 +30623,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -29349,7 +30685,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -29713,7 +31049,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -29814,7 +31150,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -29852,6 +31188,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetHttpProxies", @@ -29982,7 +31323,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30125,7 +31466,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30163,6 +31504,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetHttpsProxies", @@ -30293,7 +31639,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30601,7 +31947,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30639,6 +31985,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetInstances", @@ -30794,7 +32145,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30845,6 +32196,55 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "httpMethod": "POST", + "id": "compute.targetInstances.setSecurityPolicy", + "parameterOrder": [ + "project", + "zone", + "targetInstance" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetInstance": { + "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "zone": { + "description": "Name of the zone scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -30960,7 +32360,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -30998,6 +32398,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetPools", @@ -31199,7 +32604,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -31406,6 +32811,55 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] + }, + "setSecurityPolicy": { + "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", + "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", + "httpMethod": "POST", + "id": "compute.targetPools.setSecurityPolicy", + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "targetPool": { + "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", + "request": { + "$ref": "SecurityPolicyReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -31529,7 +32983,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -31796,7 +33250,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -31834,6 +33288,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetTcpProxies", @@ -31964,7 +33423,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32107,7 +33566,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32145,6 +33604,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/targetVpnGateways", @@ -32300,7 +33764,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32416,7 +33880,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32454,6 +33918,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/urlMaps", @@ -32626,7 +34095,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32806,7 +34275,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -32844,6 +34313,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/vpnGateways", @@ -33042,7 +34516,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -33204,7 +34678,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -33242,6 +34716,11 @@ "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", "location": "query", "type": "boolean" + }, + "serviceProjectNumber": { + "format": "int64", + "location": "query", + "type": "string" } }, "path": "projects/{project}/aggregated/vpnTunnels", @@ -33397,7 +34876,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -33596,7 +35075,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -33740,7 +35219,7 @@ ], "parameters": { "filter": { - "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", "location": "query", "type": "string" }, @@ -33788,9 +35267,32 @@ } } }, - "revision": "20230711", + "revision": "20231024", "rootUrl": "https://compute.googleapis.com/", "schemas": { + "AWSV4Signature": { + "description": "Contains the configurations necessary to generate a signature for access to private storage buckets that support Signature Version 4 for authentication. The service name for generating the authentication header will always default to 's3'.", + "id": "AWSV4Signature", + "properties": { + "accessKey": { + "description": "The access key used for s3 bucket authentication. Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request. @InputOnly", + "type": "string" + }, + "accessKeyId": { + "description": "The identifier of an access key used for s3 bucket authentication.", + "type": "string" + }, + "accessKeyVersion": { + "description": "The optional version identifier for the access key. You can use this to keep track of different iterations of your access key.", + "type": "string" + }, + "originRegion": { + "description": "The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. For example, \"us-east-1\" for AWS or \"us-ashburn-1\" for OCI.", + "type": "string" + } + }, + "type": "object" + }, "AcceleratorConfig": { "description": "A specification of the type and number of accelerator cards attached to the instance.", "id": "AcceleratorConfig", @@ -34345,6 +35847,10 @@ "description": "The DNS domain name for the public PTR record. You can set this field only if the `setPublicPtr` field is enabled in accessConfig. If this field is unspecified in ipv6AccessConfig, a default PTR record will be createc for first IP in associated external IPv6 range.", "type": "string" }, + "securityPolicy": { + "description": "[Output Only] The resource URL for the security policy associated with this access config.", + "type": "string" + }, "setPublicPtr": { "description": "Specifies whether a public DNS 'PTR' record should be created to map the external IP address of the instance to a DNS domain name. This field is not used in ipv6AccessConfig. A default PTR record will be created if the VM has external IPv6 range associated.", "type": "boolean" @@ -35334,7 +36840,7 @@ "type": "string" }, "replicaZones": { - "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone.", "items": { "type": "string" }, @@ -36231,7 +37737,7 @@ "type": "string" }, "timeZone": { - "description": "The time zone to use when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. This field is assigned a default value of “UTC” if left empty.", + "description": "The time zone to use when interpreting the schedule. The value of this field must be a time zone name from the tz database: https://en.wikipedia.org/wiki/Tz_database. This field is assigned a default value of \"UTC\" if left empty.", "type": "string" } }, @@ -36795,7 +38301,7 @@ "type": "array" }, "localityLbPolicy": { - "description": "The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "The load balancing algorithm used within the scope of the locality. The possible values are: - ROUND_ROBIN: This is a simple policy in which each healthy backend is selected in round robin order. This is the default. - LEAST_REQUEST: An O(1) algorithm which selects two random healthy hosts and picks the host which has fewer active requests. - RING_HASH: The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests. - RANDOM: The load balancer selects a random healthy host. - ORIGINAL_DESTINATION: Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer. - MAGLEV: used as a drop in replacement for the ring hash load balancer. Maglev is not as stable as ring hash but has faster table lookup build times and host selection times. For more information about Maglev, see https://ai.google/research/pubs/pub44824 This field is applicable to either: - A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to INTERNAL_MANAGED. - A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED, INTERNAL_MANAGED, or EXTERNAL_MANAGED. If sessionAffinity is not NONE, and this field is not set to MAGLEV or RING_HASH, session affinity settings will not take effect. Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -36934,6 +38440,12 @@ "description": "The backend service timeout has a different meaning depending on the type of load balancer. For more information see, Backend service settings. The default is 30 seconds. The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true. Instead, use maxStreamDuration.", "format": "int32", "type": "integer" + }, + "usedBy": { + "items": { + "$ref": "BackendServiceUsedBy" + }, + "type": "array" } }, "type": "object" @@ -37301,7 +38813,7 @@ "id": "BackendServiceIAP", "properties": { "enabled": { - "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests. If true, the oauth2ClientId and oauth2ClientSecret fields must be non-empty.", + "description": "Whether the serving infrastructure will authenticate and authorize all incoming requests.", "type": "boolean" }, "oauth2ClientId": { @@ -37471,6 +38983,158 @@ }, "type": "object" }, + "BackendServiceListUsable": { + "description": "Contains a list of usable BackendService resources.", + "id": "BackendServiceListUsable", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of BackendService resources.", + "items": { + "$ref": "BackendService" + }, + "type": "array" + }, + "kind": { + "default": "compute#usableBackendServiceList", + "description": "[Output Only] Type of resource. Always compute#usableBackendServiceList for lists of usable backend services.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "LIST_OVERHEAD_QUOTA_EXCEED", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDeprecated": [ + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "Resource can't be retrieved due to list overhead quota exceed which captures the amount of resources filtered out by user-defined list filter.", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "BackendServiceLocalityLoadBalancingPolicyConfig": { "description": "Container for either a built-in LB policy supported by gRPC or Envoy or a custom one implemented by the end user.", "id": "BackendServiceLocalityLoadBalancingPolicyConfig", @@ -37576,6 +39240,15 @@ }, "type": "object" }, + "BackendServiceUsedBy": { + "id": "BackendServiceUsedBy", + "properties": { + "reference": { + "type": "string" + } + }, + "type": "object" + }, "BackendServicesScopedList": { "id": "BackendServicesScopedList", "properties": { @@ -38034,6 +39707,10 @@ "description": "Per-instance properties to be set on individual instances. To be extended in the future.", "id": "BulkInsertInstanceResourcePerInstanceProperties", "properties": { + "hostname": { + "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", + "type": "string" + }, "name": { "description": "This field is only temporary. It will be removed. Do not use it.", "type": "string" @@ -38041,6 +39718,48 @@ }, "type": "object" }, + "BulkInsertOperationStatus": { + "id": "BulkInsertOperationStatus", + "properties": { + "createdVmCount": { + "description": "[Output Only] Count of VMs successfully created so far.", + "format": "int32", + "type": "integer" + }, + "deletedVmCount": { + "description": "[Output Only] Count of VMs that got deleted during rollback.", + "format": "int32", + "type": "integer" + }, + "failedToCreateVmCount": { + "description": "[Output Only] Count of VMs that started creating but encountered an error.", + "format": "int32", + "type": "integer" + }, + "status": { + "description": "[Output Only] Creation status of BulkInsert operation - information if the flow is rolling forward or rolling back.", + "enum": [ + "CREATING", + "DONE", + "ROLLING_BACK", + "STATUS_UNSPECIFIED" + ], + "enumDescriptions": [ + "Rolling forward - creating VMs.", + "Done", + "Rolling back - cleaning up after an error.", + "" + ], + "type": "string" + }, + "targetVmCount": { + "description": "[Output Only] Count of VMs originally planned to be created.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "CacheInvalidationRule": { "id": "CacheInvalidationRule", "properties": { @@ -38212,7 +39931,7 @@ "type": "string" }, "reservations": { - "description": "List of reservations in this commitment.", + "description": "List of create-on-create reseravtions for this commitment.", "items": { "$ref": "Reservation" }, @@ -38263,9 +39982,12 @@ "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines. Type ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to accelerator optimized machines.", "enum": [ "ACCELERATOR_OPTIMIZED", + "ACCELERATOR_OPTIMIZED_A3", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", "COMPUTE_OPTIMIZED_C3", + "COMPUTE_OPTIMIZED_C3D", + "COMPUTE_OPTIMIZED_H3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", @@ -38289,6 +40011,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -39178,7 +40903,7 @@ "type": "string" }, "sizeGb": { - "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk. If you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are 1 to 65536, inclusive.", + "description": "Size, in GB, of the persistent disk. You can specify this field when creating a persistent disk using the sourceImage, sourceSnapshot, or sourceDisk parameter, or specify it alone to create an empty persistent disk. If you specify this field along with a source, the value of sizeGb must not be less than the size of the source. Acceptable values are greater than 0.", "format": "int64", "type": "string" }, @@ -41945,7 +43670,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -41965,7 +43690,7 @@ "type": "string" }, "noAutomateDnsZone": { - "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", + "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. Once set, this field is not mutable.", "type": "boolean" }, "portRange": { @@ -42039,7 +43764,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. The target is not mutable once set as a service attachment. ", "type": "string" } }, @@ -42730,6 +44455,7 @@ "SECURE_BOOT", "SEV_CAPABLE", "SEV_LIVE_MIGRATABLE", + "SEV_LIVE_MIGRATABLE_V2", "SEV_SNP_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", @@ -42745,6 +44471,7 @@ "", "", "", + "", "" ], "type": "string" @@ -42912,7 +44639,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -44329,7 +46056,7 @@ }, "faultInjectionPolicy": { "$ref": "HttpFaultInjection", - "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. Fault injection is not supported with the global external HTTP(S) load balancer (classic). To see which load balancers support fault injection, see Load balancing: Routing and traffic management features." + "description": "The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. Similarly requests from clients can be aborted by the load balancer for a percentage of requests. timeout and retry_policy is ignored by clients that are configured with a fault_injection_policy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. Fault injection is not supported with the classic Application Load Balancer . To see which load balancers support fault injection, see Load balancing: Routing and traffic management features." }, "maxStreamDuration": { "$ref": "Duration", @@ -44349,7 +46076,7 @@ }, "urlRewrite": { "$ref": "UrlRewrite", - "description": "The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "description": "The spec to modify the URL of the request, before forwarding the request to the matched service. urlRewrite is the only action supported in UrlMaps for classic Application Load Balancers. Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "weightedBackendServices": { "description": "A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction.", @@ -44387,7 +46114,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a route rule's routeAction." + "description": "In response to a matching matchRule, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of urlRedirect, service or routeAction.weightedBackendService must be set. URL maps for classic Application Load Balancers only support the urlRewrite action within a route rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -45923,11 +47650,6 @@ "type": "array" }, "baseInstanceName": { - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - }, "description": "The base instance name to use for instances in this group. The value must be 1-58 characters long. Instances are named by appending a hyphen and a random four-character string to the base instance name. The base instance name must comply with RFC1035.", "pattern": "[a-z][-a-z0-9]{0,57}", "type": "string" @@ -48017,7 +49739,7 @@ "type": "object" }, "InstanceTemplate": { - "description": "Represents an Instance Template resource. You can use instance templates to create VM instances and managed instance groups. For more information, read Instance Templates.", + "description": "Represents an Instance Template resource. Google Compute Engine has two Instance Template resources: * [Global](/compute/docs/reference/rest/v1/instanceTemplates) * [Regional](/compute/docs/reference/rest/v1/regionInstanceTemplates) You can reuse a global instance template in different regions whereas you can use a regional instance template in a specified region only. If you want to reduce cross-region dependency or achieve data residency, use a regional instance template. To create VMs, managed instance groups, and reservations, you can use either global or regional instance templates. For more information, read Instance Templates.", "id": "InstanceTemplate", "properties": { "creationTimestamp": { @@ -48568,6 +50290,19 @@ }, "type": "object" }, + "InstancesBulkInsertOperationMetadata": { + "id": "InstancesBulkInsertOperationMetadata", + "properties": { + "perLocationStatus": { + "additionalProperties": { + "$ref": "BulkInsertOperationStatus" + }, + "description": "Status information per location (location name is key). Example key: zones/us-central1-a", + "type": "object" + } + }, + "type": "object" + }, "InstancesGetEffectiveFirewallsResponse": { "id": "InstancesGetEffectiveFirewallsResponse", "properties": { @@ -48841,6 +50576,23 @@ }, "type": "object" }, + "InstancesSetSecurityPolicyRequest": { + "id": "InstancesSetSecurityPolicyRequest", + "properties": { + "networkInterfaces": { + "description": "The network interfaces that the security policy will be applied to. Network interfaces use the nicN naming format. You can only set a security policy for network interfaces with an access config.", + "items": { + "type": "string" + }, + "type": "array" + }, + "securityPolicy": { + "description": "A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.", + "type": "string" + } + }, + "type": "object" + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "properties": { @@ -48896,6 +50648,19 @@ "description": "Administrative status of the interconnect. When this is set to true, the Interconnect is functional and can carry traffic. When set to false, no packets can be carried over the interconnect and no BGP routes are exchanged over it. By default, the status is set to true.", "type": "boolean" }, + "availableFeatures": { + "description": "[Output only] List of features available for this Interconnect connection, which can take one of the following values: - MACSEC If present then the Interconnect connection is provisioned on MACsec capable hardware ports. If not present then the Interconnect connection is provisioned on non-MACsec capable ports and MACsec isn't supported and enabling MACsec fails.", + "items": { + "enum": [ + "IF_MACSEC" + ], + "enumDescriptions": [ + "Media Access Control security (MACsec)" + ], + "type": "string" + }, + "type": "array" + }, "circuitInfos": { "description": "[Output Only] A list of CircuitInfo objects, that describe the individual circuits in this LAG.", "items": { @@ -48989,6 +50754,14 @@ "description": "URL of the InterconnectLocation object that represents where this connection is to be provisioned.", "type": "string" }, + "macsec": { + "$ref": "InterconnectMacsec", + "description": "Configuration that enables Media Access Control security (MACsec) on the Cloud Interconnect connection between Google and your on-premises router." + }, + "macsecEnabled": { + "description": "Enable or disable MACsec on this Interconnect connection. MACsec enablement fails if the MACsec object is not specified.", + "type": "boolean" + }, "name": { "annotations": { "required": [ @@ -49028,6 +50801,19 @@ "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", "type": "string" }, + "requestedFeatures": { + "description": "Optional. List of features requested for this Interconnect connection, which can take one of the following values: - MACSEC If specified then the connection is created on MACsec capable hardware ports. If not specified, the default value is false, which allocates non-MACsec capable ports first if available. This parameter can be provided only with Interconnect INSERT. It isn't valid for Interconnect PATCH.", + "items": { + "enum": [ + "IF_MACSEC" + ], + "enumDescriptions": [ + "Media Access Control security (MACsec)" + ], + "type": "string" + }, + "type": "array" + }, "requestedLinkCount": { "description": "Target number of physical links in the link bundle, as requested by the customer.", "format": "int32", @@ -49238,7 +51024,7 @@ "type": "string" }, "pairingKey": { - "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of an PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", + "description": "[Output only for type PARTNER. Input only for PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier of a PARTNER attachment used to initiate provisioning with a selected partner. Of the form \"XXXXX/region/domain\"", "type": "string" }, "partnerAsn": { @@ -49248,7 +51034,7 @@ }, "partnerMetadata": { "$ref": "InterconnectAttachmentPartnerMetadata", - "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." + "description": "Informational metadata about Partner attachments from Partners to display to customers. Output only for PARTNER type, mutable for PARTNER_PROVIDER, not available for DEDICATED." }, "privateInterconnectInfo": { "$ref": "InterconnectAttachmentPrivateInfo", @@ -50011,6 +51797,10 @@ "lacpStatus": { "$ref": "InterconnectDiagnosticsLinkLACPStatus" }, + "macsec": { + "$ref": "InterconnectDiagnosticsMacsecStatus", + "description": "Describes the status of MACsec encryption on this link." + }, "operationalStatus": { "description": "The operational status of the link.", "enum": [ @@ -50034,6 +51824,21 @@ }, "type": "object" }, + "InterconnectDiagnosticsMacsecStatus": { + "description": "Describes the status of MACsec encryption on the link.", + "id": "InterconnectDiagnosticsMacsecStatus", + "properties": { + "ckn": { + "description": "Indicates the Connectivity Association Key Name (CKN) currently being used if MACsec is operational.", + "type": "string" + }, + "operational": { + "description": "Indicates whether or not MACsec is operational on this link.", + "type": "boolean" + } + }, + "type": "object" + }, "InterconnectList": { "description": "Response to the list request, and contains a list of interconnects.", "id": "InterconnectList", @@ -50198,6 +52003,34 @@ "description": "[Output Only] Availability zone for this InterconnectLocation. Within a metropolitan area (metro), maintenance will not be simultaneously scheduled in more than one availability zone. Example: \"zone1\" or \"zone2\".", "type": "string" }, + "availableFeatures": { + "description": "[Output only] List of features available at this InterconnectLocation, which can take one of the following values: - MACSEC ", + "items": { + "enum": [ + "IF_MACSEC" + ], + "enumDescriptions": [ + "Media Access Control security (MACsec)" + ], + "type": "string" + }, + "type": "array" + }, + "availableLinkTypes": { + "description": "[Output only] List of link types available at this InterconnectLocation, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR ", + "items": { + "enum": [ + "LINK_TYPE_ETHERNET_100G_LR", + "LINK_TYPE_ETHERNET_10G_LR" + ], + "enumDescriptions": [ + "100G Ethernet, LR Optics.", + "10G Ethernet, LR Optics. [(rate_bps) = 10000000000];" + ], + "type": "string" + }, + "type": "array" + }, "city": { "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", "type": "string" @@ -50478,6 +52311,76 @@ }, "type": "object" }, + "InterconnectMacsec": { + "description": "Configuration information for enabling Media Access Control security (MACsec) on this Cloud Interconnect connection between Google and your on-premises router.", + "id": "InterconnectMacsec", + "properties": { + "failOpen": { + "description": "If set to true, the Interconnect connection is configured with a should-secure MACsec security policy, that allows the Google router to fallback to cleartext traffic if the MKA session cannot be established. By default, the Interconnect connection is configured with a must-secure security policy that drops all traffic if the MKA session cannot be established with your router.", + "type": "boolean" + }, + "preSharedKeys": { + "description": "Required. A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK is generated for each key in the key chain. Google router automatically picks the key with the most recent startTime when establishing or re-establishing a MACsec secure link.", + "items": { + "$ref": "InterconnectMacsecPreSharedKey" + }, + "type": "array" + } + }, + "type": "object" + }, + "InterconnectMacsecConfig": { + "description": "MACsec configuration information for the Interconnect connection. Contains the generated Connectivity Association Key Name (CKN) and the key (CAK) for this Interconnect connection.", + "id": "InterconnectMacsecConfig", + "properties": { + "preSharedKeys": { + "description": "A keychain placeholder describing a set of named key objects along with their start times. A MACsec CKN/CAK is generated for each key in the key chain. Google router automatically picks the key with the most recent startTime when establishing or re-establishing a MACsec secure link.", + "items": { + "$ref": "InterconnectMacsecConfigPreSharedKey" + }, + "type": "array" + } + }, + "type": "object" + }, + "InterconnectMacsecConfigPreSharedKey": { + "description": "Describes a pre-shared key used to setup MACsec in static connectivity association key (CAK) mode.", + "id": "InterconnectMacsecConfigPreSharedKey", + "properties": { + "cak": { + "description": "An auto-generated Connectivity Association Key (CAK) for this key.", + "type": "string" + }, + "ckn": { + "description": "An auto-generated Connectivity Association Key Name (CKN) for this key.", + "type": "string" + }, + "name": { + "description": "User provided name for this pre-shared key.", + "type": "string" + }, + "startTime": { + "description": "User provided timestamp on or after which this key is valid.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectMacsecPreSharedKey": { + "description": "Describes a pre-shared key used to setup MACsec in static connectivity association key (CAK) mode.", + "id": "InterconnectMacsecPreSharedKey", + "properties": { + "name": { + "description": "Required. A name for this pre-shared key. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "type": "string" + }, + "startTime": { + "description": "A RFC3339 timestamp on or after which the key is valid. startTime can be in the future. If the keychain has a single key, startTime can be omitted. If the keychain has multiple keys, startTime is mandatory for each key. The start times of keys must be in increasing order. The start times of two consecutive keys must be at least 6 hours apart.", + "type": "string" + } + }, + "type": "object" + }, "InterconnectOutageNotification": { "description": "Description of a planned outage on this Interconnect.", "id": "InterconnectOutageNotification", @@ -50900,6 +52803,20 @@ }, "type": "object" }, + "InterconnectsGetMacsecConfigResponse": { + "description": "Response for the InterconnectsGetMacsecConfigRequest.", + "id": "InterconnectsGetMacsecConfigResponse", + "properties": { + "etag": { + "description": "end_interface: MixerGetResponseWithEtagBuilder", + "type": "string" + }, + "result": { + "$ref": "InterconnectMacsecConfig" + } + }, + "type": "object" + }, "License": { "description": "Represents a License resource. A License represents billing and aggregate usage data for public and marketplace images. *Caution* This resource is intended for use only by third-party partners who are creating Cloud Marketplace images. ", "id": "License", @@ -52311,6 +54228,10 @@ "$ref": "ManagedInstanceLastAttempt", "description": "[Output Only] Information about the last attempt to create or delete the instance." }, + "name": { + "description": "[Output Only] The name of the instance. The name always exists even if the instance has not yet been created.", + "type": "string" + }, "preservedStateFromConfig": { "$ref": "PreservedState", "description": "[Output Only] Preserved state applied from per-instance config for this instance." @@ -52529,6 +54450,72 @@ }, "type": "object" }, + "NatIpInfo": { + "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", + "id": "NatIpInfo", + "properties": { + "natIpInfoMappings": { + "description": "A list of all NAT IPs assigned to this NAT config.", + "items": { + "$ref": "NatIpInfoNatIpInfoMapping" + }, + "type": "array" + }, + "natName": { + "description": "Name of the NAT config which the NAT IP belongs to.", + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoNatIpInfoMapping": { + "description": "Contains information of a NAT IP.", + "id": "NatIpInfoNatIpInfoMapping", + "properties": { + "mode": { + "description": "Specifies whether NAT IP is auto or manual.", + "enum": [ + "AUTO", + "MANUAL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "natIp": { + "description": "NAT IP address. For example: 203.0.113.11.", + "type": "string" + }, + "usage": { + "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", + "enum": [ + "IN_USE", + "UNUSED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoResponse": { + "id": "NatIpInfoResponse", + "properties": { + "result": { + "description": "[Output Only] A list of NAT IP information.", + "items": { + "$ref": "NatIpInfo" + }, + "type": "array" + } + }, + "type": "object" + }, "Network": { "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Network", @@ -52891,6 +54878,10 @@ "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", "type": "string" }, + "ipv6Address": { + "description": "The IPv6 address assigned to the producer instance network interface. This is only assigned when the stack types of both the instance network interface and the consumer subnet are IPv4_IPv6.", + "type": "string" + }, "projectIdOrNum": { "description": "The project id or number of the interface to which the IP was assigned.", "type": "string" @@ -52925,6 +54916,10 @@ "subnetwork": { "description": "The subnetwork used to assign the IP to the producer instance network interface.", "type": "string" + }, + "subnetworkCidrRange": { + "description": "[Output Only] The CIDR range of the subnet from which the IPv4 internal IP was allocated from.", + "type": "string" } }, "type": "object" @@ -53592,7 +55587,7 @@ "type": "object" }, "NetworkEndpointGroup": { - "description": "Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs, see Setting up external HTTP(S) Load Balancing with internet NEGs, Setting up zonal NEGs, or Setting up external HTTP(S) Load Balancing with serverless NEGs.", + "description": "Represents a collection of network endpoints. A network endpoint group (NEG) defines how a set of endpoints should be reached, whether they are reachable, and where they are located. For more information about using NEGs for different use cases, see Network endpoint groups overview.", "id": "NetworkEndpointGroup", "properties": { "annotations": { @@ -57084,7 +59079,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zonalOperations` resource. For more information, read Global, Regional, and Zonal Resources.", + "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.", "id": "Operation", "properties": { "clientOperationId": { @@ -57169,6 +59164,9 @@ "description": "[Output Only] The time that this operation was requested. This value is in RFC3339 text format.", "type": "string" }, + "instancesBulkInsertOperationMetadata": { + "$ref": "InstancesBulkInsertOperationMetadata" + }, "kind": { "default": "compute#operation", "description": "[Output Only] Type of the resource. Always `compute#operation` for Operation resources.", @@ -57199,6 +59197,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "setCommonInstanceMetadataOperationMetadata": { + "$ref": "SetCommonInstanceMetadataOperationMetadata", + "description": "[Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state." + }, "startTime": { "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", "type": "string" @@ -57231,7 +59233,7 @@ "type": "string" }, "user": { - "description": "[Output Only] User who requested the operation, for example: `user@example.com`.", + "description": "[Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.", "type": "string" }, "warnings": { @@ -58579,7 +60581,7 @@ "properties": { "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path matcher's defaultRouteAction." + "description": "defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for classic Application Load Balancers only support the urlRewrite action within a path matcher's defaultRouteAction." }, "defaultService": { "description": "The full or partial URL to the BackendService resource. This URL is used if none of the pathRules or routeRules defined by this PathMatcher are matched. For example, the following are all valid URLs to a BackendService resource: - https://www.googleapis.com/compute/v1/projects/project /global/backendServices/backendService - compute/v1/projects/project/global/backendServices/backendService - global/backendServices/backendService If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if defaultRouteAction specifies any weightedBackendServices, defaultService must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. Authorization requires one or more of the following Google IAM permissions on the specified resource default_service: - compute.backendBuckets.use - compute.backendServices.use ", @@ -58631,7 +60633,7 @@ }, "routeAction": { "$ref": "HttpRouteAction", - "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within a path rule's routeAction." + "description": "In response to a matching path, the load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If routeAction specifies any weightedBackendServices, service must not be set. Conversely if service is set, routeAction cannot contain any weightedBackendServices. Only one of routeAction or urlRedirect must be set. URL maps for classic Application Load Balancers only support the urlRewrite action within a path rule's routeAction." }, "service": { "description": "The full or partial URL of the backend service resource to which traffic is directed if this rule is matched. If routeAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if service is specified, routeAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of urlRedirect, service or routeAction.weightedBackendService must be set.", @@ -58684,7 +60686,7 @@ "type": "object" }, "Policy": { - "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } **YAML example:** bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", + "description": "An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members`, or principals, to a single `role`. Principals can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** ``` { \"bindings\": [ { \"role\": \"roles/resourcemanager.organizationAdmin\", \"members\": [ \"user:mike@example.com\", \"group:admins@example.com\", \"domain:google.com\", \"serviceAccount:my-project-id@appspot.gserviceaccount.com\" ] }, { \"role\": \"roles/resourcemanager.organizationViewer\", \"members\": [ \"user:eve@example.com\" ], \"condition\": { \"title\": \"expirable access\", \"description\": \"Does not grant access after Sep 2020\", \"expression\": \"request.time \u003c timestamp('2020-10-01T00:00:00.000Z')\", } } ], \"etag\": \"BwWWja0YfJA=\", \"version\": 3 } ``` **YAML example:** ``` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time \u003c timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 ``` For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).", "id": "Policy", "properties": { "auditConfigs": { @@ -58745,6 +60747,20 @@ "description": "Preserved disks defined for this instance. This map is keyed with the device names of the disks.", "type": "object" }, + "externalIPs": { + "additionalProperties": { + "$ref": "PreservedStatePreservedNetworkIp" + }, + "description": "Preserved external IPs defined for this instance. This map is keyed with the name of the network interface.", + "type": "object" + }, + "internalIPs": { + "additionalProperties": { + "$ref": "PreservedStatePreservedNetworkIp" + }, + "description": "Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface.", + "type": "object" + }, "metadata": { "additionalProperties": { "type": "string" @@ -58789,6 +60805,42 @@ }, "type": "object" }, + "PreservedStatePreservedNetworkIp": { + "id": "PreservedStatePreservedNetworkIp", + "properties": { + "autoDelete": { + "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", + "enum": [ + "NEVER", + "ON_PERMANENT_INSTANCE_DELETION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "ipAddress": { + "$ref": "PreservedStatePreservedNetworkIpIpAddress", + "description": "Ip address representation" + } + }, + "type": "object" + }, + "PreservedStatePreservedNetworkIpIpAddress": { + "id": "PreservedStatePreservedNetworkIpIpAddress", + "properties": { + "address": { + "description": "The URL of the reservation for this IP address.", + "type": "string" + }, + "literal": { + "description": "An IPv4 internal network address to assign to the instance for this network interface.", + "type": "string" + } + }, + "type": "object" + }, "Project": { "description": "Represents a Project resource. A project is used to organize resources in a Google Cloud Platform environment. For more information, read about the Resource Hierarchy.", "id": "Project", @@ -58970,6 +61022,18 @@ "description": "A public advertised prefix represents an aggregated IP prefix or netblock which customers bring to cloud. The IP prefix is a single unit of route advertisement and is announced globally to the internet.", "id": "PublicAdvertisedPrefix", "properties": { + "byoipApiVersion": { + "description": "[Output Only] The version of BYOIP API.", + "enum": [ + "V1", + "V2" + ], + "enumDescriptions": [ + "This public advertised prefix can be used to create both regional and global public delegated prefixes. It usually takes 4 weeks to create or delete a public delegated prefix. The BGP status cannot be changed.", + "This public advertised prefix can only be used to create regional public delegated prefixes. Public delegated prefix creation and deletion takes minutes and the BGP status can be modified." + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -59011,6 +61075,20 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "pdpScope": { + "description": "Specifies how child public delegated prefix will be scoped. It could be one of following values: - `REGIONAL`: The public delegated prefix is regional only. The provisioning will take a few minutes. - `GLOBAL`: The public delegated prefix is global only. The provisioning will take ~4 weeks. - `GLOBAL_AND_REGIONAL` [output only]: The public delegated prefixes is BYOIP V1 legacy prefix. This is output only value and no longer supported in BYOIP V2. ", + "enum": [ + "GLOBAL", + "GLOBAL_AND_REGIONAL", + "REGIONAL" + ], + "enumDescriptions": [ + "The public delegated prefix is global only. The provisioning will take ~4 weeks.", + "The public delegated prefixes is BYOIP V1 legacy prefix. This is output only value and no longer supported in BYOIP V2.", + "The public delegated prefix is regional only. The provisioning will take a few minutes." + ], + "type": "string" + }, "publicDelegatedPrefixs": { "description": "[Output Only] The list of public delegated prefixes that exist for this public advertised prefix.", "items": { @@ -59029,20 +61107,24 @@ "status": { "description": "The status of the public advertised prefix. Possible values include: - `INITIAL`: RPKI validation is complete. - `PTR_CONFIGURED`: User has configured the PTR. - `VALIDATED`: Reverse DNS lookup is successful. - `REVERSE_DNS_LOOKUP_FAILED`: Reverse DNS lookup failed. - `PREFIX_CONFIGURATION_IN_PROGRESS`: The prefix is being configured. - `PREFIX_CONFIGURATION_COMPLETE`: The prefix is fully configured. - `PREFIX_REMOVAL_IN_PROGRESS`: The prefix is being removed. ", "enum": [ + "ANNOUNCED_TO_INTERNET", "INITIAL", "PREFIX_CONFIGURATION_COMPLETE", "PREFIX_CONFIGURATION_IN_PROGRESS", "PREFIX_REMOVAL_IN_PROGRESS", "PTR_CONFIGURED", + "READY_TO_ANNOUNCE", "REVERSE_DNS_LOOKUP_FAILED", "VALIDATED" ], "enumDescriptions": [ + "The prefix is announced to Internet.", "RPKI validation is complete.", "The prefix is fully configured.", "The prefix is being configured.", "The prefix is being removed.", "User has configured the PTR.", + "The prefix is currently withdrawn but ready to be announced.", "Reverse DNS lookup failed.", "Reverse DNS lookup is successful." ], @@ -59233,6 +61315,18 @@ "description": "A PublicDelegatedPrefix resource represents an IP block within a PublicAdvertisedPrefix that is configured within a single cloud scope (global or region). IPs in the block can be allocated to resources within that scope. Public delegated prefixes may be further broken up into smaller IP blocks in the same scope as the parent block.", "id": "PublicDelegatedPrefix", "properties": { + "byoipApiVersion": { + "description": "[Output Only] The version of BYOIP API.", + "enum": [ + "V1", + "V2" + ], + "enumDescriptions": [ + "This public delegated prefix usually takes 4 weeks to delete, and the BGP status cannot be changed. Announce and Withdraw APIs can not be used on this prefix.", + "This public delegated prefix takes minutes to delete. Announce and Withdraw APIs can be used on this prefix to change the BGP status." + ], + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -59297,12 +61391,16 @@ "description": "[Output Only] The status of the public delegated prefix, which can be one of following values: - `INITIALIZING` The public delegated prefix is being initialized and addresses cannot be created yet. - `READY_TO_ANNOUNCE` The public delegated prefix is a live migration prefix and is active. - `ANNOUNCED` The public delegated prefix is active. - `DELETING` The public delegated prefix is being deprovsioned. ", "enum": [ "ANNOUNCED", + "ANNOUNCED_TO_GOOGLE", + "ANNOUNCED_TO_INTERNET", "DELETING", "INITIALIZING", "READY_TO_ANNOUNCE" ], "enumDescriptions": [ "The public delegated prefix is active.", + "The prefix is announced within Google network.", + "The prefix is announced to Internet and within Google.", "The public delegated prefix is being deprovsioned.", "The public delegated prefix is being initialized and addresses cannot be created yet.", "The public delegated prefix is currently withdrawn but ready to be announced." @@ -61622,6 +63720,32 @@ }, "type": "object" }, + "RegionNetworkEndpointGroupsAttachEndpointsRequest": { + "id": "RegionNetworkEndpointGroupsAttachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be attached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, + "RegionNetworkEndpointGroupsDetachEndpointsRequest": { + "id": "RegionNetworkEndpointGroupsDetachEndpointsRequest", + "properties": { + "networkEndpoints": { + "description": "The list of network endpoints to be detached.", + "items": { + "$ref": "NetworkEndpoint" + }, + "type": "array" + } + }, + "type": "object" + }, "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse": { "id": "RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse", "properties": { @@ -63225,7 +65349,7 @@ "compute.routes.insert" ] }, - "description": "The destination range of outgoing packets that this route applies to. Both IPv4 and IPv6 are supported.", + "description": "The destination range of outgoing packets that this route applies to. Both IPv4 and IPv6 are supported. Must specify an IPv4 range (e.g. 192.0.2.0/24) or an IPv6 range in RFC 4291 format (e.g. 2001:db8::/32). IPv6 range will be displayed using RFC 5952 compressed format.", "type": "string" }, "id": { @@ -63274,7 +65398,7 @@ "type": "string" }, "nextHopIp": { - "description": "The network IP address of an instance that should handle matching packets. Only IPv4 is supported.", + "description": "The network IP address of an instance that should handle matching packets. Both IPv6 address and IPv4 addresses are supported. Must specify an IPv4 address in dot-decimal notation (e.g. 192.0.2.99) or an IPv6 address in RFC 4291 format (e.g. 2001:db8::2d9:51:0:0 or 2001:db8:0:0:2d9:51:0:0). IPv6 addresses will be displayed using RFC 5952 compressed format (e.g. 2001:db8::2d9:51:0:0). Should never be an IPv4-mapped IPv6 address.", "type": "string" }, "nextHopNetwork": { @@ -63689,7 +65813,7 @@ "type": "string" }, "interfaces": { - "description": "Router interfaces. Each interface requires either one linked resource, (for example, linkedVpnTunnel), or IP address and IP address range (for example, ipRange), or both.", + "description": "Router interfaces. To create a BGP peer that uses a router interface, the interface must have one of the following fields specified: - linkedVpnTunnel - linkedInterconnectAttachment - subnetwork You can create a router interface without any of these fields specified. However, you cannot create a BGP peer that uses that interface.", "items": { "$ref": "RouterInterface" }, @@ -64156,11 +66280,11 @@ "type": "string" }, "linkedInterconnectAttachment": { - "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance.", + "description": "URI of the linked Interconnect attachment. It must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a subnetwork.", "type": "string" }, "linkedVpnTunnel": { - "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a virtual machine instance.", + "description": "URI of the linked VPN tunnel, which must be in the same region as the router. Each interface can have one linked resource, which can be a VPN tunnel, an Interconnect attachment, or a subnetwork.", "type": "string" }, "managementType": { @@ -64372,7 +66496,7 @@ "compute.routers.update" ] }, - "description": "Name used to identify the key. Must be unique within a router. Must be referenced by at least one bgpPeer. Must comply with RFC1035.", + "description": "Name used to identify the key. Must be unique within a router. Must be referenced by exactly one bgpPeer. Must comply with RFC1035.", "type": "string" } }, @@ -64383,7 +66507,7 @@ "id": "RouterNat", "properties": { "autoNetworkTier": { - "description": "The network tier to use when automatically reserving IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, PREMIUM tier will be used.", + "description": "The network tier to use when automatically reserving NAT IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, then the current project-level default tier is used.", "enum": [ "FIXED_STANDARD", "PREMIUM", @@ -64416,10 +66540,12 @@ "description": "List of NAT-ted endpoint types supported by the Nat Gateway. If the list is empty, then it will be equivalent to include ENDPOINT_TYPE_VM", "items": { "enum": [ + "ENDPOINT_TYPE_MANAGED_PROXY_LB", "ENDPOINT_TYPE_SWG", "ENDPOINT_TYPE_VM" ], "enumDescriptions": [ + "This is used for regional Application Load Balancers (internal and external) and regional proxy Network Load Balancers (internal and external) endpoints.", "This is used for Secure Web Gateway endpoints.", "This is the default." ], @@ -64558,7 +66684,7 @@ "type": "string" }, "match": { - "description": "CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: \"inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')\" \"destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'\" The following example is a valid match expression for private NAT: \"nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'\"", + "description": "CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. If it evaluates to true, the corresponding `action` is enforced. The following examples are valid match expressions for public NAT: \"inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')\" \"destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'\" The following example is a valid match expression for private NAT: \"nexthop.hub == '//networkconnectivity.googleapis.com/projects/my-project/locations/global/hubs/hub-1'\"", "type": "string" }, "ruleNumber": { @@ -65836,6 +67962,13 @@ "" ], "type": "string" + }, + "userDefinedFields": { + "description": "Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. Rules may then specify matching values for these fields. Example: userDefinedFields: - name: \"ipv4_fragment_offset\" base: IPV4 offset: 6 size: 2 mask: \"0x1fff\"", + "items": { + "$ref": "SecurityPolicyUserDefinedField" + }, + "type": "array" } }, "type": "object" @@ -65870,6 +68003,40 @@ "" ], "type": "string" + }, + "thresholdConfigs": { + "description": "Configuration options for layer7 adaptive protection for various customizable thresholds.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig": { + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig", + "properties": { + "autoDeployConfidenceThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployExpirationSec": { + "format": "int32", + "type": "integer" + }, + "autoDeployImpactedBaselineThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployLoadThreshold": { + "format": "float", + "type": "number" + }, + "name": { + "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" } }, "type": "object" @@ -66127,6 +68294,10 @@ "$ref": "SecurityPolicyRuleMatcher", "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." }, + "networkMatch": { + "$ref": "SecurityPolicyRuleNetworkMatcher", + "description": "A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. Example: networkMatch: srcIpRanges: - \"192.0.2.0/24\" - \"198.51.100.0/24\" userDefinedFields: - name: \"ipv4_fragment_offset\" values: - \"1-0x1fff\" The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named \"ipv4_fragment_offset\" with a value between 1 and 0x1fff inclusive." + }, "preconfiguredWafConfig": { "$ref": "SecurityPolicyRulePreconfiguredWafConfig", "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect." @@ -66216,6 +68387,87 @@ }, "type": "object" }, + "SecurityPolicyRuleNetworkMatcher": { + "description": "Represents a match condition that incoming network traffic is evaluated against.", + "id": "SecurityPolicyRuleNetworkMatcher", + "properties": { + "destIpRanges": { + "description": "Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", + "items": { + "type": "string" + }, + "type": "array" + }, + "destPorts": { + "description": "Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", + "items": { + "type": "string" + }, + "type": "array" + }, + "ipProtocols": { + "description": "IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. \"6\"), range (e.g. \"253-254\"), or one of the following protocol names: \"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"ipip\", or \"sctp\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcAsns": { + "description": "BGP Autonomous System Number associated with the source IP address.", + "items": { + "format": "uint32", + "type": "integer" + }, + "type": "array" + }, + "srcIpRanges": { + "description": "Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format.", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcPorts": { + "description": "Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. \"80\") or range (e.g. \"0-1023\").", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcRegionCodes": { + "description": "Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address.", + "items": { + "type": "string" + }, + "type": "array" + }, + "userDefinedFields": { + "description": "User-defined fields. Each element names a defined field and lists the matching values for that field.", + "items": { + "$ref": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch": { + "id": "SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch", + "properties": { + "name": { + "description": "Name of the user-defined field, as given in the definition.", + "type": "string" + }, + "values": { + "description": "Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with \"0x\") number (e.g. \"64\") or range (e.g. \"0x400-0x7ff\").", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SecurityPolicyRulePreconfiguredWafConfig": { "id": "SecurityPolicyRulePreconfiguredWafConfig", "properties": { @@ -66440,10 +68692,54 @@ }, "type": "object" }, + "SecurityPolicyUserDefinedField": { + "id": "SecurityPolicyUserDefinedField", + "properties": { + "base": { + "description": "The base relative to which 'offset' is measured. Possible values are: - IPV4: Points to the beginning of the IPv4 header. - IPV6: Points to the beginning of the IPv6 header. - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. required", + "enum": [ + "IPV4", + "IPV6", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ], + "type": "string" + }, + "mask": { + "description": "If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. Encoded as a hexadecimal number (starting with \"0x\"). The last byte of the field (in network byte order) corresponds to the least significant byte of the mask.", + "type": "string" + }, + "name": { + "description": "The name of this field. Must be unique within the policy.", + "type": "string" + }, + "offset": { + "description": "Offset of the first byte of the field (in network byte order) relative to 'base'.", + "format": "int32", + "type": "integer" + }, + "size": { + "description": "Size of the field in bytes. Valid values: 1-4.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, "SecuritySettings": { "description": "The authentication and authorization settings for a BackendService.", "id": "SecuritySettings", "properties": { + "awsV4Authentication": { + "$ref": "AWSV4Signature", + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, "clientTlsPolicy": { "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", "type": "string" @@ -66625,7 +68921,7 @@ "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." }, "reconcileConnections": { - "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", + "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to false.", "type": "boolean" }, "region": { @@ -67147,6 +69443,53 @@ }, "type": "object" }, + "SetCommonInstanceMetadataOperationMetadata": { + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { + "description": "[Output Only] The client operation id.", + "type": "string" + }, + "perLocationOperations": { + "additionalProperties": { + "$ref": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo" + }, + "description": "[Output Only] Status information per location (location name is key). Example key: zones/us-central1-a", + "type": "object" + } + }, + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo": { + "id": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "[Output Only] If state is `ABANDONED` or `FAILED`, this field is populated." + }, + "state": { + "description": "[Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.", + "enum": [ + "ABANDONED", + "DONE", + "FAILED", + "PROPAGATED", + "PROPAGATING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Operation not tracked in this location e.g. zone is marked as DOWN.", + "Operation has completed successfully.", + "Operation is in an error state.", + "Operation is confirmed to be in the location.", + "Operation is not yet confirmed to have been created in the location.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ShareSettings": { "description": "The share setting for reservations and sole tenancy node groups.", "id": "ShareSettings", @@ -67402,6 +69745,10 @@ "$ref": "CustomerEncryptionKey", "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." }, + "sourceDiskForRecoveryCheckpoint": { + "description": "The source disk whose recovery checkpoint will be used to create this snapshot.", + "type": "string" + }, "sourceDiskId": { "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name.", "type": "string" @@ -67611,6 +69958,56 @@ }, "type": "object" }, + "SnapshotSettings": { + "id": "SnapshotSettings", + "properties": { + "storageLocation": { + "$ref": "SnapshotSettingsStorageLocationSettings", + "description": "Policy of which storage location is going to be resolved, and additional data that particularizes how the policy is going to be carried out." + } + }, + "type": "object" + }, + "SnapshotSettingsStorageLocationSettings": { + "id": "SnapshotSettingsStorageLocationSettings", + "properties": { + "locations": { + "additionalProperties": { + "$ref": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference" + }, + "description": "When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are GCS bucket locations.", + "type": "object" + }, + "policy": { + "description": "The chosen location policy.", + "enum": [ + "LOCAL_REGION", + "NEAREST_MULTI_REGION", + "SPECIFIC_LOCATIONS", + "STORAGE_LOCATION_POLICY_UNSPECIFIED" + ], + "enumDescriptions": [ + "Store snapshot in the same region as with the originating disk. No additional parameters are needed.", + "Store snapshot to the nearest multi region GCS bucket, relative to the originating disk. No additional parameters are needed.", + "Store snapshot in the specific locations, as specified by the user. The list of regions to store must be defined under the `locations` field.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "SnapshotSettingsStorageLocationSettingsStorageLocationPreference": { + "description": "A structure for specifying storage locations.", + "id": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference", + "properties": { + "name": { + "description": "Name of the location. It should be one of the GCS buckets.", + "type": "string" + } + }, + "type": "object" + }, "SourceDiskEncryptionKey": { "id": "SourceDiskEncryptionKey", "properties": { @@ -67728,7 +70125,7 @@ "type": "object" }, "SslCertificate": { - "description": "Represents an SSL Certificate resource. Google Compute Engine has two SSL Certificate resources: * [Global](/compute/docs/reference/rest/v1/sslCertificates) * [Regional](/compute/docs/reference/rest/v1/regionSslCertificates) The sslCertificates are used by: - external HTTPS load balancers - SSL proxy load balancers The regionSslCertificates are used by internal HTTPS load balancers. Optionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates, SSL certificates quotas and limits, and Troubleshooting SSL certificates.", + "description": "Represents an SSL certificate resource. Google Compute Engine has two SSL certificate resources: * [Global](/compute/docs/reference/rest/v1/sslCertificates) * [Regional](/compute/docs/reference/rest/v1/regionSslCertificates) The global SSL certificates (sslCertificates) are used by: - Global external Application Load Balancers - Classic Application Load Balancers - Proxy Network Load Balancers (with target SSL proxies) The regional SSL certificates (regionSslCertificates) are used by: - Regional external Application Load Balancers - Regional internal Application Load Balancers Optionally, certificate file contents that you upload can contain a set of up to five PEM-encoded certificates. The API call creates an object (sslCertificate) that holds this data. You can use SSL keys and certificates to secure connections to a load balancer. For more information, read Creating and using SSL certificates, SSL certificates quotas and limits, and Troubleshooting SSL certificates.", "id": "SslCertificate", "properties": { "certificate": { @@ -69024,6 +71421,20 @@ }, "description": "Disks created on the instances that will be preserved on instance delete, update, etc. This map is keyed with the device names of the disks.", "type": "object" + }, + "externalIPs": { + "additionalProperties": { + "$ref": "StatefulPolicyPreservedStateNetworkIp" + }, + "description": "External network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", + "type": "object" + }, + "internalIPs": { + "additionalProperties": { + "$ref": "StatefulPolicyPreservedStateNetworkIp" + }, + "description": "Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name.", + "type": "object" } }, "type": "object" @@ -69046,6 +71457,51 @@ }, "type": "object" }, + "StatefulPolicyPreservedStateNetworkIp": { + "id": "StatefulPolicyPreservedStateNetworkIp", + "properties": { + "autoDelete": { + "description": "These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted.", + "enum": [ + "NEVER", + "ON_PERMANENT_INSTANCE_DELETION" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, "Subnetwork": { "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Subnetwork", @@ -69143,6 +71599,7 @@ "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ + "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", "PRIVATE_RFC_1918", @@ -69150,11 +71607,12 @@ "REGIONAL_MANAGED_PROXY" ], "enumDescriptions": [ + "Subnet reserved for Global Envoy-based Load Balancing.", "Subnet reserved for Internal HTTP(S) Load Balancing.", "Regular user created or automatically created subnet.", "Regular user created or automatically created subnet.", "Subnetworks created for Private Service Connect in the producer network.", - "Subnetwork used for Regional Internal/External HTTP(S) Load Balancing." + "Subnetwork used for Regional Envoy-based Load Balancing." ], "type": "string" }, @@ -70181,7 +72639,7 @@ "type": "object" }, "TargetHttpProxy": { - "description": "Represents a Target HTTP Proxy resource. Google Compute Engine has two Target HTTP Proxy resources: * [Global](/compute/docs/reference/rest/v1/targetHttpProxies) * [Regional](/compute/docs/reference/rest/v1/regionTargetHttpProxies) A target HTTP proxy is a component of GCP HTTP load balancers. * targetHttpProxies are used by external HTTP load balancers and Traffic Director. * regionTargetHttpProxies are used by internal HTTP load balancers. Forwarding rules reference a target HTTP proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts.", + "description": "Represents a Target HTTP Proxy resource. Google Compute Engine has two Target HTTP Proxy resources: * [Global](/compute/docs/reference/rest/v1/targetHttpProxies) * [Regional](/compute/docs/reference/rest/v1/regionTargetHttpProxies) A target HTTP proxy is a component of Google Cloud HTTP load balancers. * targetHttpProxies are used by global external Application Load Balancers, classic Application Load Balancers, cross-region internal Application Load Balancers, and Traffic Director. * regionTargetHttpProxies are used by regional internal Application Load Balancers and regional external Application Load Balancers. Forwarding rules reference a target HTTP proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts.", "id": "TargetHttpProxy", "properties": { "creationTimestamp": { @@ -70198,7 +72656,7 @@ "type": "string" }, "httpKeepAliveTimeoutSec": { - "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", + "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For global external Application Load Balancers, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For classic Application Load Balancers, this option is not supported.", "format": "int32", "type": "integer" }, @@ -70604,7 +73062,7 @@ "type": "object" }, "TargetHttpsProxy": { - "description": "Represents a Target HTTPS Proxy resource. Google Compute Engine has two Target HTTPS Proxy resources: * [Global](/compute/docs/reference/rest/v1/targetHttpsProxies) * [Regional](/compute/docs/reference/rest/v1/regionTargetHttpsProxies) A target HTTPS proxy is a component of GCP HTTPS load balancers. * targetHttpsProxies are used by external HTTPS load balancers. * regionTargetHttpsProxies are used by internal HTTPS load balancers. Forwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts.", + "description": "Represents a Target HTTPS Proxy resource. Google Compute Engine has two Target HTTPS Proxy resources: * [Global](/compute/docs/reference/rest/v1/targetHttpsProxies) * [Regional](/compute/docs/reference/rest/v1/regionTargetHttpsProxies) A target HTTPS proxy is a component of GCP HTTPS load balancers. * targetHttpProxies are used by global external Application Load Balancers, classic Application Load Balancers, cross-region internal Application Load Balancers, and Traffic Director. * regionTargetHttpProxies are used by regional internal Application Load Balancers and regional external Application Load Balancers. Forwarding rules reference a target HTTPS proxy, and the target proxy then references a URL map. For more information, read Using Target Proxies and Forwarding rule concepts.", "id": "TargetHttpsProxy", "properties": { "authorizationPolicy": { @@ -70629,7 +73087,7 @@ "type": "string" }, "httpKeepAliveTimeoutSec": { - "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For Global external HTTP(S) load balancer, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For Global external HTTP(S) load balancer (classic), this option is not available publicly.", + "description": "Specifies how long to keep a connection open, after completing a response, while there is no matching traffic (in seconds). If an HTTP keep-alive is not specified, a default value (610 seconds) will be used. For global external Application Load Balancers, the minimum allowed value is 5 seconds and the maximum allowed value is 1200 seconds. For classic Application Load Balancers, this option is not supported.", "format": "int32", "type": "integer" }, @@ -71052,6 +73510,10 @@ "description": "The URL of the network this target instance uses to forward traffic. If not specified, the traffic will be forwarded to the network that the default network interface belongs to.", "type": "string" }, + "securityPolicy": { + "description": "[Output Only] The resource URL for the security policy associated with this target instance.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -71562,6 +74024,10 @@ "description": "[Output Only] URL of the region where the target pool resides.", "type": "string" }, + "securityPolicy": { + "description": "[Output Only] The resource URL for the security policy associated with this target pool.", + "type": "string" + }, "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" @@ -73542,7 +76008,7 @@ "type": "object" }, "UrlMap": { - "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by external HTTP(S) load balancers and Traffic Director. * regionUrlMaps are used by internal HTTP(S) load balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", + "description": "Represents a URL Map resource. Compute Engine has two URL Map resources: * [Global](/compute/docs/reference/rest/v1/urlMaps) * [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) A URL map resource is a component of certain types of cloud load balancers and Traffic Director: * urlMaps are used by global external Application Load Balancers, classic Application Load Balancers, and cross-region internal Application Load Balancers. * regionUrlMaps are used by internal Application Load Balancers, regional external Application Load Balancers and regional internal Application Load Balancers. For a list of supported URL map features by the load balancer type, see the Load balancing features: Routing and traffic management table. For a list of supported URL map features for Traffic Director, see the Traffic Director features: Routing and traffic management table. This resource defines mappings from hostnames and URL paths to either a backend service or a backend bucket. To use the global urlMaps resource, the backend service must have a loadBalancingScheme of either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend service must have a loadBalancingScheme of INTERNAL_MANAGED. For more information, read URL Map Concepts.", "id": "UrlMap", "properties": { "creationTimestamp": { @@ -73551,7 +76017,7 @@ }, "defaultRouteAction": { "$ref": "HttpRouteAction", - "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." + "description": "defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. Only one of defaultRouteAction or defaultUrlRedirect must be set. URL maps for classic Application Load Balancers only support the urlRewrite action within defaultRouteAction. defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true." }, "defaultService": { "description": "The full or partial URL of the defaultService resource to which traffic is directed if none of the hostRules match. If defaultRouteAction is also specified, advanced routing actions, such as URL rewrites, take effect before sending the request to the backend. However, if defaultService is specified, defaultRouteAction cannot contain any weightedBackendServices. Conversely, if routeAction specifies any weightedBackendServices, service must not be specified. Only one of defaultService, defaultUrlRedirect , or defaultRouteAction.weightedBackendService must be set. defaultService has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true.", @@ -74160,7 +76626,7 @@ "id": "UrlMapsValidateRequest", "properties": { "loadBalancingSchemes": { - "description": "Specifies the load balancer type(s) this validation request is for. Use EXTERNAL_MANAGED for HTTP/HTTPS External Global Load Balancer with Advanced Traffic Management. Use EXTERNAL for Classic HTTP/HTTPS External Global Load Balancer. Other load balancer types are not supported. For more information, refer to Choosing a load balancer. If unspecified, the load balancing scheme will be inferred from the backend service resources this URL map references. If that can not be inferred (for example, this URL map only references backend buckets, or this Url map is for rewrites and redirects only and doesn't reference any backends), EXTERNAL will be used as the default type. If specified, the scheme(s) must not conflict with the load balancing scheme of the backend service resources this Url map references.", + "description": "Specifies the load balancer type(s) this validation request is for. Use EXTERNAL_MANAGED for global external Application Load Balancers and regional external Application Load Balancers. Use EXTERNAL for classic Application Load Balancers. Use INTERNAL_MANAGED for internal Application Load Balancers. For more information, refer to Choosing a load balancer. If unspecified, the load balancing scheme will be inferred from the backend service resources this URL map references. If that can not be inferred (for example, this URL map only references backend buckets, or this Url map is for rewrites and redirects only and doesn't reference any backends), EXTERNAL will be used as the default type. If specified, the scheme(s) must not conflict with the load balancing scheme of the backend service resources this Url map references.", "items": { "enum": [ "EXTERNAL", @@ -74168,9 +76634,9 @@ "LOAD_BALANCING_SCHEME_UNSPECIFIED" ], "enumDescriptions": [ - "Signifies that this will be used for Classic L7 External Load Balancing.", - "Signifies that this will be used for Envoy-based L7 External Load Balancing.", - "If unspecified, the validation will try to infer the scheme from the backend service resources this Url map references. If the inferrence is not possible, EXTERNAL will be used as the default type." + "Signifies that this will be used for classic Application Load Balancers.", + "Signifies that this will be used for Envoy-based global external Application Load Balancers.", + "If unspecified, the validation will try to infer the scheme from the backend service resources this Url map references. If the inference is not possible, EXTERNAL will be used as the default type." ], "type": "string" }, @@ -74246,6 +76712,7 @@ "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ + "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", "PRIVATE_RFC_1918", @@ -74253,11 +76720,12 @@ "REGIONAL_MANAGED_PROXY" ], "enumDescriptions": [ + "Subnet reserved for Global Envoy-based Load Balancing.", "Subnet reserved for Internal HTTP(S) Load Balancing.", "Regular user created or automatically created subnet.", "Regular user created or automatically created subnet.", "Subnetworks created for Private Service Connect in the producer network.", - "Subnetwork used for Regional Internal/External HTTP(S) Load Balancing." + "Subnetwork used for Regional Envoy-based Load Balancing." ], "type": "string" }, @@ -75447,7 +77915,7 @@ "type": "integer" }, "peerGcpGateway": { - "description": "URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field can be used when creating highly available VPN from VPC network to VPC network, the field is exclusive with the field peerExternalGateway. If provided, the VPN tunnel will automatically use the same vpnGatewayInterface ID in the peer GCP VPN gateway.", + "description": "URL of the peer side HA VPN gateway to which this VPN tunnel is connected. Provided by the client when the VPN tunnel is created. This field can be used when creating highly available VPN from VPC network to VPC network, the field is exclusive with the field peerExternalGateway. If provided, the VPN tunnel will automatically use the same vpnGatewayInterface ID in the peer Google Cloud VPN gateway.", "type": "string" }, "peerIp": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 811675da12d41..06d94490af036 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -8,6 +8,17 @@ // // For product documentation, see: https://cloud.google.com/compute/ // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,28 +28,31 @@ // ctx := context.Background() // computeService, err := compute.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // computeService, err := compute.NewService(ctx, option.WithScopes(compute.DevstorageReadWriteScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // computeService, err := compute.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // computeService, err := compute.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package compute // import "google.golang.org/api/compute/v1" import ( @@ -220,6 +234,7 @@ func New(client *http.Client) (*Service, error) { s.Routes = NewRoutesService(s) s.SecurityPolicies = NewSecurityPoliciesService(s) s.ServiceAttachments = NewServiceAttachmentsService(s) + s.SnapshotSettings = NewSnapshotSettingsService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) s.SslPolicies = NewSslPoliciesService(s) @@ -395,6 +410,8 @@ type Service struct { ServiceAttachments *ServiceAttachmentsService + SnapshotSettings *SnapshotSettingsService + Snapshots *SnapshotsService SslCertificates *SslCertificatesService @@ -1112,6 +1129,15 @@ type ServiceAttachmentsService struct { s *Service } +func NewSnapshotSettingsService(s *Service) *SnapshotSettingsService { + rs := &SnapshotSettingsService{s: s} + return rs +} + +type SnapshotSettingsService struct { + s *Service +} + func NewSnapshotsService(s *Service) *SnapshotsService { rs := &SnapshotsService{s: s} return rs @@ -1265,6 +1291,55 @@ type ZonesService struct { s *Service } +// AWSV4Signature: Contains the configurations necessary to generate a +// signature for access to private storage buckets that support +// Signature Version 4 for authentication. The service name for +// generating the authentication header will always default to 's3'. +type AWSV4Signature struct { + // AccessKey: The access key used for s3 bucket authentication. Required + // for updating or creating a backend that uses AWS v4 signature + // authentication, but will not be returned as part of the configuration + // when queried with a REST API GET request. @InputOnly + AccessKey string `json:"accessKey,omitempty"` + + // AccessKeyId: The identifier of an access key used for s3 bucket + // authentication. + AccessKeyId string `json:"accessKeyId,omitempty"` + + // AccessKeyVersion: The optional version identifier for the access key. + // You can use this to keep track of different iterations of your access + // key. + AccessKeyVersion string `json:"accessKeyVersion,omitempty"` + + // OriginRegion: The name of the cloud region of your origin. This is a + // free-form field with the name of the region your cloud uses to host + // your origin. For example, "us-east-1" for AWS or "us-ashburn-1" for + // OCI. + OriginRegion string `json:"originRegion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessKey") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessKey") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AWSV4Signature) MarshalJSON() ([]byte, error) { + type NoMethod AWSV4Signature + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorConfig: A specification of the type and number of // accelerator cards attached to the instance. type AcceleratorConfig struct { @@ -1996,6 +2071,10 @@ type AccessConfig struct { // external IPv6 range. PublicPtrDomainName string `json:"publicPtrDomainName,omitempty"` + // SecurityPolicy: [Output Only] The resource URL for the security + // policy associated with this access config. + SecurityPolicy string `json:"securityPolicy,omitempty"` + // SetPublicPtr: Specifies whether a public DNS 'PTR' record should be // created to map the external IP address of the instance to a DNS // domain name. This field is not used in ipv6AccessConfig. A default @@ -3331,8 +3410,7 @@ type AttachedDiskInitializeParams struct { // ReplicaZones: Required for each regional disk associated with the // instance. Specify the URLs of the zones where the disk should be // replicated to. You must provide exactly two replica zones, and one - // zone must be the same as the instance zone. You can't use this option - // with boot disks. + // zone must be the same as the instance zone. ReplicaZones []string `json:"replicaZones,omitempty"` // ResourceManagerTags: Resource manager tags to be bound to the disk. @@ -4737,8 +4815,8 @@ type AutoscalingPolicyScalingSchedule struct { // TimeZone: The time zone to use when interpreting the schedule. The // value of this field must be a time zone name from the tz database: - // http://en.wikipedia.org/wiki/Tz_database. This field is assigned a - // default value of “UTC” if left empty. + // https://en.wikipedia.org/wiki/Tz_database. This field is assigned a + // default value of "UTC" if left empty. TimeZone string `json:"timeZone,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to @@ -5626,12 +5704,12 @@ type BackendService struct { // either: - A regional backend service with the service_protocol set to // HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to // INTERNAL_MANAGED. - A global backend service with the - // load_balancing_scheme set to INTERNAL_SELF_MANAGED. If - // sessionAffinity is not NONE, and this field is not set to MAGLEV or - // RING_HASH, session affinity settings will not take effect. Only - // ROUND_ROBIN and RING_HASH are supported when the backend service is - // referenced by a URL map that is bound to target gRPC proxy that has - // validateForProxyless field set to true. + // load_balancing_scheme set to INTERNAL_SELF_MANAGED, INTERNAL_MANAGED, + // or EXTERNAL_MANAGED. If sessionAffinity is not NONE, and this field + // is not set to MAGLEV or RING_HASH, session affinity settings will not + // take effect. Only ROUND_ROBIN and RING_HASH are supported when the + // backend service is referenced by a URL map that is bound to target + // gRPC proxy that has validateForProxyless field set to true. // // Possible values: // "INVALID_LB_POLICY" @@ -5834,6 +5912,8 @@ type BackendService struct { // use maxStreamDuration. TimeoutSec int64 `json:"timeoutSec,omitempty"` + UsedBy []*BackendServiceUsedBy `json:"usedBy,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -6499,8 +6579,7 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { // Enabled: Whether the serving infrastructure will authenticate and - // authorize all incoming requests. If true, the oauth2ClientId and - // oauth2ClientSecret fields must be non-empty. + // authorize all incoming requests. Enabled bool `json:"enabled,omitempty"` // Oauth2ClientId: OAuth2 client ID to use for the authentication flow. @@ -6732,6 +6811,201 @@ func (s *BackendServiceListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BackendServiceListUsable: Contains a list of usable BackendService +// resources. +type BackendServiceListUsable struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of BackendService resources. + Items []*BackendService `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always + // compute#usableBackendServiceList for lists of usable backend + // services. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *BackendServiceListUsableWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListUsable) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceListUsable + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BackendServiceListUsableWarning: [Output Only] Informational warning +// message. +type BackendServiceListUsableWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "LIST_OVERHEAD_QUOTA_EXCEED" - Resource can't be retrieved due to + // list overhead quota exceed which captures the amount of resources + // filtered out by user-defined list filter. + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*BackendServiceListUsableWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListUsableWarning) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceListUsableWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceListUsableWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceListUsableWarningData) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceListUsableWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceLocalityLoadBalancingPolicyConfig: Container for either // a built-in LB policy supported by gRPC or Envoy or a custom one // implemented by the end user. @@ -6965,6 +7239,32 @@ func (s *BackendServiceReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceUsedBy struct { + Reference string `json:"reference,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Reference") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Reference") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceUsedBy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceUsedBy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type BackendServicesScopedList struct { // BackendServices: A list of BackendServices contained in this scope. BackendServices []*BackendService `json:"backendServices,omitempty"` @@ -7581,11 +7881,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { // properties to be set on individual instances. To be extended in the // future. type BulkInsertInstanceResourcePerInstanceProperties struct { + // Hostname: Specifies the hostname of the instance. More details in: + // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention + Hostname string `json:"hostname,omitempty"` + // Name: This field is only temporary. It will be removed. Do not use // it. Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Hostname") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -7593,8 +7897,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Hostname") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -7608,6 +7912,57 @@ func (s *BulkInsertInstanceResourcePerInstanceProperties) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BulkInsertOperationStatus struct { + // CreatedVmCount: [Output Only] Count of VMs successfully created so + // far. + CreatedVmCount int64 `json:"createdVmCount,omitempty"` + + // DeletedVmCount: [Output Only] Count of VMs that got deleted during + // rollback. + DeletedVmCount int64 `json:"deletedVmCount,omitempty"` + + // FailedToCreateVmCount: [Output Only] Count of VMs that started + // creating but encountered an error. + FailedToCreateVmCount int64 `json:"failedToCreateVmCount,omitempty"` + + // Status: [Output Only] Creation status of BulkInsert operation - + // information if the flow is rolling forward or rolling back. + // + // Possible values: + // "CREATING" - Rolling forward - creating VMs. + // "DONE" - Done + // "ROLLING_BACK" - Rolling back - cleaning up after an error. + // "STATUS_UNSPECIFIED" + Status string `json:"status,omitempty"` + + // TargetVmCount: [Output Only] Count of VMs originally planned to be + // created. + TargetVmCount int64 `json:"targetVmCount,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreatedVmCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreatedVmCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BulkInsertOperationStatus) MarshalJSON() ([]byte, error) { + type NoMethod BulkInsertOperationStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type CacheInvalidationRule struct { // Host: If set, this invalidation rule will only apply to requests with // a Host header matching host. @@ -7836,7 +8191,8 @@ type Commitment struct { // used. Region string `json:"region,omitempty"` - // Reservations: List of reservations in this commitment. + // Reservations: List of create-on-create reseravtions for this + // commitment. Reservations []*Reservation `json:"reservations,omitempty"` // Resources: A list of commitment amounts for particular resources. @@ -7879,9 +8235,12 @@ type Commitment struct { // // Possible values: // "ACCELERATOR_OPTIMIZED" + // "ACCELERATOR_OPTIMIZED_A3" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" // "COMPUTE_OPTIMIZED_C3" + // "COMPUTE_OPTIMIZED_C3D" + // "COMPUTE_OPTIMIZED_H3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" @@ -9093,7 +9452,7 @@ type Disk struct { // sourceSnapshot, or sourceDisk parameter, or specify it alone to // create an empty persistent disk. If you specify this field along with // a source, the value of sizeGb must not be less than the size of the - // source. Acceptable values are 1 to 65536, inclusive. + // source. Acceptable values are greater than 0. SizeGb int64 `json:"sizeGb,omitempty,string"` // SourceConsistencyGroupPolicy: [Output Only] URL of the @@ -13146,13 +13505,14 @@ type ForwardingRule struct { // numbers and must start with a letter. Name string `json:"name,omitempty"` - // Network: This field is not used for external load balancing. For - // Internal TCP/UDP Load Balancing, this field identifies the network - // that the load balanced IP should belong to for this Forwarding Rule. - // If the subnetwork is specified, the network of the subnetwork will be - // used. If neither subnetwork nor this field is specified, the default - // network will be used. For Private Service Connect forwarding rules - // that forward traffic to Google APIs, a network must be provided. + // Network: This field is not used for global external load balancing. + // For Internal TCP/UDP Load Balancing, this field identifies the + // network that the load balanced IP should belong to for this + // Forwarding Rule. If the subnetwork is specified, the network of the + // subnetwork will be used. If neither subnetwork nor this field is + // specified, the default network will be used. For Private Service + // Connect forwarding rules that forward traffic to Google APIs, a + // network must be provided. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -13176,7 +13536,8 @@ type ForwardingRule struct { // NoAutomateDnsZone: This is used in PSC consumer ForwardingRule to // control whether it should try to auto-generate a DNS zone or not. - // Non-PSC forwarding rules do not use this field. + // Non-PSC forwarding rules do not use this field. Once set, this field + // is not mutable. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` // PortRange: This field can only be used: - If IPProtocol is one of @@ -13287,7 +13648,8 @@ type ForwardingRule struct { // vpc-sc - APIs that support VPC Service Controls. - all-apis - All // supported Google APIs. - For Private Service Connect forwarding rules // that forward traffic to managed services, the target must be a - // service attachment. + // service attachment. The target is not mutable once set as a service + // attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -14355,6 +14717,7 @@ type GuestOsFeature struct { // "SECURE_BOOT" // "SEV_CAPABLE" // "SEV_LIVE_MIGRATABLE" + // "SEV_LIVE_MIGRATABLE_V2" // "SEV_SNP_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" @@ -14666,23 +15029,25 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// HealthCheck: Represents a Health Check resource. Google Compute -// Engine has two Health Check resources: * Global -// (/compute/docs/reference/rest/v1/healthChecks) * Regional -// (/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) -// load balancers must use regional health checks -// (`compute.v1.regionHealthChecks`). Traffic Director must use global -// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load -// balancers can use either regional or global health checks -// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). -// External HTTP(S), TCP proxy, and SSL proxy load balancers as well as -// managed instance group auto-healing must use global health checks -// (`compute.v1.healthChecks`). Backend service-based network load -// balancers must use regional health checks -// (`compute.v1.regionHealthChecks`). Target pool-based network load -// balancers must use legacy HTTP health checks -// (`compute.v1.httpHealthChecks`). For more information, see Health -// checks overview. +// HealthCheck: Represents a health check resource. Google Compute +// Engine has two health check resources: * Regional +// (/compute/docs/reference/rest/v1/regionHealthChecks) * Global +// (/compute/docs/reference/rest/v1/healthChecks) These health check +// resources can be used for load balancing and for autohealing VMs in a +// managed instance group (MIG). **Load balancing** The following load +// balancer can use either regional or global health check: * Internal +// TCP/UDP load balancer The following load balancers require regional +// health check: * Internal HTTP(S) load balancer * Backend +// service-based network load balancer Traffic Director and the +// following load balancers require global health check: * External +// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load +// balancer The following load balancer require legacy HTTP health +// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target +// pool-based network load balancer **Autohealing in MIGs** The health +// checks that you use for autohealing VMs in a MIG can be either +// regional or global. For more information, see Set up an application +// health check and autohealing. For more information, see Health checks +// overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -16795,10 +17160,9 @@ type HttpRouteAction struct { // retry_policy is ignored by clients that are configured with a // fault_injection_policy if: 1. The traffic is generated by fault // injection AND 2. The fault injection is not a delay fault injection. - // Fault injection is not supported with the global external HTTP(S) - // load balancer (classic). To see which load balancers support fault - // injection, see Load balancing: Routing and traffic management - // features. + // Fault injection is not supported with the classic Application Load + // Balancer . To see which load balancers support fault injection, see + // Load balancing: Routing and traffic management features. FaultInjectionPolicy *HttpFaultInjection `json:"faultInjectionPolicy,omitempty"` // MaxStreamDuration: Specifies the maximum duration (timeout) for @@ -16837,9 +17201,9 @@ type HttpRouteAction struct { // UrlRewrite: The spec to modify the URL of the request, before // forwarding the request to the matched service. urlRewrite is the only - // action supported in UrlMaps for external HTTP(S) load balancers. Not - // supported when the URL map is bound to a target gRPC proxy that has - // the validateForProxyless field set to true. + // action supported in UrlMaps for classic Application Load Balancers. + // Not supported when the URL map is bound to a target gRPC proxy that + // has the validateForProxyless field set to true. UrlRewrite *UrlRewrite `json:"urlRewrite,omitempty"` // WeightedBackendServices: A list of weighted backend services to send @@ -16926,8 +17290,8 @@ type HttpRouteRule struct { // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of urlRedirect, // service or routeAction.weightedBackendService must be set. URL maps - // for Classic external HTTP(S) load balancers only support the - // urlRewrite action within a route rule's routeAction. + // for classic Application Load Balancers only support the urlRewrite + // action within a route rule's routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -22026,9 +22390,16 @@ func (s *InstanceReference) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplate: Represents an Instance Template resource. You can -// use instance templates to create VM instances and managed instance -// groups. For more information, read Instance Templates. +// InstanceTemplate: Represents an Instance Template resource. Google +// Compute Engine has two Instance Template resources: * Global +// (/compute/docs/reference/rest/v1/instanceTemplates) * Regional +// (/compute/docs/reference/rest/v1/regionInstanceTemplates) You can +// reuse a global instance template in different regions whereas you can +// use a regional instance template in a specified region only. If you +// want to reduce cross-region dependency or achieve data residency, use +// a regional instance template. To create VMs, managed instance groups, +// and reservations, you can use either global or regional instance +// templates. For more information, read Instance Templates. type InstanceTemplate struct { // CreationTimestamp: [Output Only] The creation timestamp for this // instance template in RFC3339 text format. @@ -22743,6 +23114,35 @@ func (s *InstancesAddResourcePoliciesRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesBulkInsertOperationMetadata struct { + // PerLocationStatus: Status information per location (location name is + // key). Example key: zones/us-central1-a + PerLocationStatus map[string]BulkInsertOperationStatus `json:"perLocationStatus,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PerLocationStatus") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PerLocationStatus") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesBulkInsertOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod InstancesBulkInsertOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesGetEffectiveFirewallsResponse struct { // FirewallPolicys: Effective firewalls from firewall policies. FirewallPolicys []*InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` @@ -23175,6 +23575,42 @@ func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesSetSecurityPolicyRequest struct { + // NetworkInterfaces: The network interfaces that the security policy + // will be applied to. Network interfaces use the nicN naming format. + // You can only set a security policy for network interfaces with an + // access config. + NetworkInterfaces []string `json:"networkInterfaces,omitempty"` + + // SecurityPolicy: A full or partial URL to a security policy to add to + // this instance. If this field is set to an empty string it will remove + // the associated security policy. + SecurityPolicy string `json:"securityPolicy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkInterfaces") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkInterfaces") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetSecurityPolicyRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesSetSecurityPolicyRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesSetServiceAccountRequest struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` @@ -23283,6 +23719,17 @@ type Interconnect struct { // set to true. AdminEnabled bool `json:"adminEnabled,omitempty"` + // AvailableFeatures: [Output only] List of features available for this + // Interconnect connection, which can take one of the following values: + // - MACSEC If present then the Interconnect connection is provisioned + // on MACsec capable hardware ports. If not present then the + // Interconnect connection is provisioned on non-MACsec capable ports + // and MACsec isn't supported and enabling MACsec fails. + // + // Possible values: + // "IF_MACSEC" - Media Access Control security (MACsec) + AvailableFeatures []string `json:"availableFeatures,omitempty"` + // CircuitInfos: [Output Only] A list of CircuitInfo objects, that // describe the individual circuits in this LAG. CircuitInfos []*InterconnectCircuitInfo `json:"circuitInfos,omitempty"` @@ -23370,6 +23817,16 @@ type Interconnect struct { // where this connection is to be provisioned. Location string `json:"location,omitempty"` + // Macsec: Configuration that enables Media Access Control security + // (MACsec) on the Cloud Interconnect connection between Google and your + // on-premises router. + Macsec *InterconnectMacsec `json:"macsec,omitempty"` + + // MacsecEnabled: Enable or disable MACsec on this Interconnect + // connection. MACsec enablement fails if the MACsec object is not + // specified. + MacsecEnabled bool `json:"macsecEnabled,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -23419,6 +23876,18 @@ type Interconnect struct { // the interconnect is connected to. RemoteLocation string `json:"remoteLocation,omitempty"` + // RequestedFeatures: Optional. List of features requested for this + // Interconnect connection, which can take one of the following values: + // - MACSEC If specified then the connection is created on MACsec + // capable hardware ports. If not specified, the default value is false, + // which allocates non-MACsec capable ports first if available. This + // parameter can be provided only with Interconnect INSERT. It isn't + // valid for Interconnect PATCH. + // + // Possible values: + // "IF_MACSEC" - Media Access Control security (MACsec) + RequestedFeatures []string `json:"requestedFeatures,omitempty"` + // RequestedLinkCount: Target number of physical links in the link // bundle, as requested by the customer. RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` @@ -23682,8 +24151,8 @@ type InterconnectAttachment struct { // PairingKey: [Output only for type PARTNER. Input only for // PARTNER_PROVIDER. Not present for DEDICATED]. The opaque identifier - // of an PARTNER attachment used to initiate provisioning with a - // selected partner. Of the form "XXXXX/region/domain" + // of a PARTNER attachment used to initiate provisioning with a selected + // partner. Of the form "XXXXX/region/domain" PairingKey string `json:"pairingKey,omitempty"` // PartnerAsn: Optional BGP ASN for the router supplied by a Layer 3 @@ -23693,8 +24162,8 @@ type InterconnectAttachment struct { PartnerAsn int64 `json:"partnerAsn,omitempty,string"` // PartnerMetadata: Informational metadata about Partner attachments - // from Partners to display to customers. Output only for for PARTNER - // type, mutable for PARTNER_PROVIDER, not available for DEDICATED. + // from Partners to display to customers. Output only for PARTNER type, + // mutable for PARTNER_PROVIDER, not available for DEDICATED. PartnerMetadata *InterconnectAttachmentPartnerMetadata `json:"partnerMetadata,omitempty"` // PrivateInterconnectInfo: [Output Only] Information specific to an @@ -24815,6 +25284,9 @@ type InterconnectDiagnosticsLinkStatus struct { LacpStatus *InterconnectDiagnosticsLinkLACPStatus `json:"lacpStatus,omitempty"` + // Macsec: Describes the status of MACsec encryption on this link. + Macsec *InterconnectDiagnosticsMacsecStatus `json:"macsec,omitempty"` + // OperationalStatus: The operational status of the link. // // Possible values: @@ -24857,6 +25329,40 @@ func (s *InterconnectDiagnosticsLinkStatus) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectDiagnosticsMacsecStatus: Describes the status of MACsec +// encryption on the link. +type InterconnectDiagnosticsMacsecStatus struct { + // Ckn: Indicates the Connectivity Association Key Name (CKN) currently + // being used if MACsec is operational. + Ckn string `json:"ckn,omitempty"` + + // Operational: Indicates whether or not MACsec is operational on this + // link. + Operational bool `json:"operational,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Ckn") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Ckn") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectDiagnosticsMacsecStatus) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectDiagnosticsMacsecStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectList: Response to the list request, and contains a list // of interconnects. type InterconnectList struct { @@ -25065,6 +25571,24 @@ type InterconnectLocation struct { // zone. Example: "zone1" or "zone2". AvailabilityZone string `json:"availabilityZone,omitempty"` + // AvailableFeatures: [Output only] List of features available at this + // InterconnectLocation, which can take one of the following values: - + // MACSEC + // + // Possible values: + // "IF_MACSEC" - Media Access Control security (MACsec) + AvailableFeatures []string `json:"availableFeatures,omitempty"` + + // AvailableLinkTypes: [Output only] List of link types available at + // this InterconnectLocation, which can take one of the following + // values: - LINK_TYPE_ETHERNET_10G_LR - LINK_TYPE_ETHERNET_100G_LR + // + // Possible values: + // "LINK_TYPE_ETHERNET_100G_LR" - 100G Ethernet, LR Optics. + // "LINK_TYPE_ETHERNET_10G_LR" - 10G Ethernet, LR Optics. [(rate_bps) + // = 10000000000]; + AvailableLinkTypes []string `json:"availableLinkTypes,omitempty"` + // City: [Output Only] Metropolitan area designator that indicates which // city an interconnect is located. For example: "Chicago, IL", // "Amsterdam, Netherlands". @@ -25409,6 +25933,167 @@ func (s *InterconnectLocationRegionInfo) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectMacsec: Configuration information for enabling Media +// Access Control security (MACsec) on this Cloud Interconnect +// connection between Google and your on-premises router. +type InterconnectMacsec struct { + // FailOpen: If set to true, the Interconnect connection is configured + // with a should-secure MACsec security policy, that allows the Google + // router to fallback to cleartext traffic if the MKA session cannot be + // established. By default, the Interconnect connection is configured + // with a must-secure security policy that drops all traffic if the MKA + // session cannot be established with your router. + FailOpen bool `json:"failOpen,omitempty"` + + // PreSharedKeys: Required. A keychain placeholder describing a set of + // named key objects along with their start times. A MACsec CKN/CAK is + // generated for each key in the key chain. Google router automatically + // picks the key with the most recent startTime when establishing or + // re-establishing a MACsec secure link. + PreSharedKeys []*InterconnectMacsecPreSharedKey `json:"preSharedKeys,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FailOpen") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FailOpen") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectMacsec) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectMacsec + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectMacsecConfig: MACsec configuration information for the +// Interconnect connection. Contains the generated Connectivity +// Association Key Name (CKN) and the key (CAK) for this Interconnect +// connection. +type InterconnectMacsecConfig struct { + // PreSharedKeys: A keychain placeholder describing a set of named key + // objects along with their start times. A MACsec CKN/CAK is generated + // for each key in the key chain. Google router automatically picks the + // key with the most recent startTime when establishing or + // re-establishing a MACsec secure link. + PreSharedKeys []*InterconnectMacsecConfigPreSharedKey `json:"preSharedKeys,omitempty"` + + // ForceSendFields is a list of field names (e.g. "PreSharedKeys") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "PreSharedKeys") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectMacsecConfig) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectMacsecConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectMacsecConfigPreSharedKey: Describes a pre-shared key used +// to setup MACsec in static connectivity association key (CAK) mode. +type InterconnectMacsecConfigPreSharedKey struct { + // Cak: An auto-generated Connectivity Association Key (CAK) for this + // key. + Cak string `json:"cak,omitempty"` + + // Ckn: An auto-generated Connectivity Association Key Name (CKN) for + // this key. + Ckn string `json:"ckn,omitempty"` + + // Name: User provided name for this pre-shared key. + Name string `json:"name,omitempty"` + + // StartTime: User provided timestamp on or after which this key is + // valid. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Cak") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Cak") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectMacsecConfigPreSharedKey) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectMacsecConfigPreSharedKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectMacsecPreSharedKey: Describes a pre-shared key used to +// setup MACsec in static connectivity association key (CAK) mode. +type InterconnectMacsecPreSharedKey struct { + // Name: Required. A name for this pre-shared key. The name must be 1-63 + // characters long, and comply with RFC1035. Specifically, the name must + // be 1-63 characters long and match the regular expression + // `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be + // a lowercase letter, and all following characters must be a dash, + // lowercase letter, or digit, except the last character, which cannot + // be a dash. + Name string `json:"name,omitempty"` + + // StartTime: A RFC3339 timestamp on or after which the key is valid. + // startTime can be in the future. If the keychain has a single key, + // startTime can be omitted. If the keychain has multiple keys, + // startTime is mandatory for each key. The start times of keys must be + // in increasing order. The start times of two consecutive keys must be + // at least 6 hours apart. + StartTime string `json:"startTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectMacsecPreSharedKey) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectMacsecPreSharedKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InterconnectOutageNotification: Description of a planned outage on // this Interconnect. type InterconnectOutageNotification struct { @@ -26008,6 +26693,41 @@ func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InterconnectsGetMacsecConfigResponse: Response for the +// InterconnectsGetMacsecConfigRequest. +type InterconnectsGetMacsecConfigResponse struct { + // Etag: end_interface: MixerGetResponseWithEtagBuilder + Etag string `json:"etag,omitempty"` + + Result *InterconnectMacsecConfig `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Etag") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Etag") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectsGetMacsecConfigResponse) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectsGetMacsecConfigResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // License: Represents a License resource. A License represents billing // and aggregate usage data for public and marketplace images. *Caution* // This resource is intended for use only by third-party partners who @@ -27940,6 +28660,10 @@ type ManagedInstance struct { // create or delete the instance. LastAttempt *ManagedInstanceLastAttempt `json:"lastAttempt,omitempty"` + // Name: [Output Only] The name of the instance. The name always exists + // even if the instance has not yet been created. + Name string `json:"name,omitempty"` + // PreservedStateFromConfig: [Output Only] Preserved state applied from // per-instance config for this instance. PreservedStateFromConfig *PreservedState `json:"preservedStateFromConfig,omitempty"` @@ -28394,6 +29118,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage +// status, mode). +type NatIpInfo struct { + // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. + NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` + + // NatName: Name of the NAT config which the NAT IP belongs to. + NatName string `json:"natName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpInfoMappings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfo) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. +type NatIpInfoNatIpInfoMapping struct { + // Mode: Specifies whether NAT IP is auto or manual. + // + // Possible values: + // "AUTO" + // "MANUAL" + Mode string `json:"mode,omitempty"` + + // NatIp: NAT IP address. For example: 203.0.113.11. + NatIp string `json:"natIp,omitempty"` + + // Usage: Specifies whether NAT IP is currently serving at least one + // endpoint or not. + // + // Possible values: + // "IN_USE" + // "UNUSED" + Usage string `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoNatIpInfoMapping + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NatIpInfoResponse struct { + // Result: [Output Only] A list of NAT IP information. + Result []*NatIpInfo `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Network: Represents a VPC Network resource. Networks connect // resources to each other and to the internet. For more information, // read Virtual Private Cloud (VPC) Network. @@ -28827,6 +29658,11 @@ type NetworkAttachmentConnectedEndpoint struct { // interface. This value will be a range in case of Serverless. IpAddress string `json:"ipAddress,omitempty"` + // Ipv6Address: The IPv6 address assigned to the producer instance + // network interface. This is only assigned when the stack types of both + // the instance network interface and the consumer subnet are IPv4_IPv6. + Ipv6Address string `json:"ipv6Address,omitempty"` + // ProjectIdOrNum: The project id or number of the interface to which // the IP was assigned. ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` @@ -28854,6 +29690,10 @@ type NetworkAttachmentConnectedEndpoint struct { // instance network interface. Subnetwork string `json:"subnetwork,omitempty"` + // SubnetworkCidrRange: [Output Only] The CIDR range of the subnet from + // which the IPv4 internal IP was allocated from. + SubnetworkCidrRange string `json:"subnetworkCidrRange,omitempty"` + // ForceSendFields is a list of field names (e.g. "IpAddress") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -29745,9 +30585,8 @@ func (s *NetworkEndpoint) MarshalJSON() ([]byte, error) { // NetworkEndpointGroup: Represents a collection of network endpoints. A // network endpoint group (NEG) defines how a set of endpoints should be // reached, whether they are reachable, and where they are located. For -// more information about using NEGs, see Setting up external HTTP(S) -// Load Balancing with internet NEGs, Setting up zonal NEGs, or Setting -// up external HTTP(S) Load Balancing with serverless NEGs. +// more information about using NEGs for different use cases, see +// Network endpoint groups overview. type NetworkEndpointGroup struct { // Annotations: Metadata defined as annotations on the network endpoint // group. @@ -34433,7 +35272,7 @@ func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { // regional or zonal. - For global operations, use the // `globalOperations` resource. - For regional operations, use the // `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, +// `zoneOperations` resource. For more information, read Global, // Regional, and Zonal Resources. type Operation struct { // ClientOperationId: [Output Only] The value of `requestId` if you @@ -34473,6 +35312,8 @@ type Operation struct { // This value is in RFC3339 text format. InsertTime string `json:"insertTime,omitempty"` + InstancesBulkInsertOperationMetadata *InstancesBulkInsertOperationMetadata `json:"instancesBulkInsertOperationMetadata,omitempty"` + // Kind: [Output Only] Type of the resource. Always `compute#operation` // for Operation resources. Kind string `json:"kind,omitempty"` @@ -34503,6 +35344,11 @@ type Operation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SetCommonInstanceMetadataOperationMetadata: [Output Only] If the + // operation is for projects.setCommonInstanceMetadata, this field will + // contain information on all underlying zonal actions and their state. + SetCommonInstanceMetadataOperationMetadata *SetCommonInstanceMetadataOperationMetadata `json:"setCommonInstanceMetadataOperationMetadata,omitempty"` + // StartTime: [Output Only] The time that this operation was started by // the server. This value is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` @@ -34530,7 +35376,8 @@ type Operation struct { TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: - // `user@example.com`. + // `user@example.com` or `alice_smith_identifier + // (global/workforcePools/example-com-us-employees)`. User string `json:"user,omitempty"` // Warnings: [Output Only] If warning messages are generated during @@ -36421,8 +37268,8 @@ type PathMatcher struct { // defaultService must not be set. Conversely if defaultService is set, // defaultRouteAction cannot contain any weightedBackendServices. Only // one of defaultRouteAction or defaultUrlRedirect must be set. URL maps - // for Classic external HTTP(S) load balancers only support the - // urlRewrite action within a path matcher's defaultRouteAction. + // for classic Application Load Balancers only support the urlRewrite + // action within a path matcher's defaultRouteAction. DefaultRouteAction *HttpRouteAction `json:"defaultRouteAction,omitempty"` // DefaultService: The full or partial URL to the BackendService @@ -36525,9 +37372,9 @@ type PathRule struct { // backend. If routeAction specifies any weightedBackendServices, // service must not be set. Conversely if service is set, routeAction // cannot contain any weightedBackendServices. Only one of routeAction - // or urlRedirect must be set. URL maps for Classic external HTTP(S) - // load balancers only support the urlRewrite action within a path - // rule's routeAction. + // or urlRedirect must be set. URL maps for classic Application Load + // Balancers only support the urlRewrite action within a path rule's + // routeAction. RouteAction *HttpRouteAction `json:"routeAction,omitempty"` // Service: The full or partial URL of the backend service resource to @@ -36648,7 +37495,7 @@ func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { // both. To learn which resources support conditions in their IAM // policies, see the IAM documentation // (https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": +// **JSON example:** ``` { "bindings": [ { "role": // "roles/resourcemanager.organizationAdmin", "members": [ // "user:mike@example.com", "group:admins@example.com", // "domain:google.com", @@ -36657,17 +37504,17 @@ func (s *PerInstanceConfig) MarshalJSON() ([]byte, error) { // "user:eve@example.com" ], "condition": { "title": "expirable access", // "description": "Does not grant access after Sep 2020", "expression": // "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], -// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - -// members: - user:mike@example.com - group:admins@example.com - -// domain:google.com - +// "etag": "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` +// bindings: - members: - user:mike@example.com - +// group:admins@example.com - domain:google.com - // serviceAccount:my-project-id@appspot.gserviceaccount.com role: // roles/resourcemanager.organizationAdmin - members: - // user:eve@example.com role: roles/resourcemanager.organizationViewer // condition: title: expirable access description: Does not grant access // after Sep 2020 expression: request.time < // timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 -// For a description of IAM and its features, see the IAM documentation -// (https://cloud.google.com/iam/docs/). +// ``` For a description of IAM and its features, see the IAM +// documentation (https://cloud.google.com/iam/docs/). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. @@ -36782,6 +37629,14 @@ type PreservedState struct { // with the device names of the disks. Disks map[string]PreservedStatePreservedDisk `json:"disks,omitempty"` + // ExternalIPs: Preserved external IPs defined for this instance. This + // map is keyed with the name of the network interface. + ExternalIPs map[string]PreservedStatePreservedNetworkIp `json:"externalIPs,omitempty"` + + // InternalIPs: Preserved internal IPs defined for this instance. This + // map is keyed with the name of the network interface. + InternalIPs map[string]PreservedStatePreservedNetworkIp `json:"internalIPs,omitempty"` + // Metadata: Preserved metadata defined for this instance. Metadata map[string]string `json:"metadata,omitempty"` @@ -36860,6 +37715,75 @@ func (s *PreservedStatePreservedDisk) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type PreservedStatePreservedNetworkIp struct { + // AutoDelete: These stateful IPs will never be released during + // autohealing, update or VM instance recreate operations. This flag is + // used to configure if the IP reservation should be deleted after it is + // no longer used by the group, e.g. when the given instance or the + // whole group is deleted. + // + // Possible values: + // "NEVER" + // "ON_PERMANENT_INSTANCE_DELETION" + AutoDelete string `json:"autoDelete,omitempty"` + + // IpAddress: Ip address representation + IpAddress *PreservedStatePreservedNetworkIpIpAddress `json:"ipAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PreservedStatePreservedNetworkIp) MarshalJSON() ([]byte, error) { + type NoMethod PreservedStatePreservedNetworkIp + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PreservedStatePreservedNetworkIpIpAddress struct { + // Address: The URL of the reservation for this IP address. + Address string `json:"address,omitempty"` + + // Literal: An IPv4 internal network address to assign to the instance + // for this network interface. + Literal string `json:"literal,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PreservedStatePreservedNetworkIpIpAddress) MarshalJSON() ([]byte, error) { + type NoMethod PreservedStatePreservedNetworkIpIpAddress + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Project: Represents a Project resource. A project is used to organize // resources in a Google Cloud Platform environment. For more // information, read about the Resource Hierarchy. @@ -37140,6 +38064,18 @@ func (s *ProjectsSetDefaultNetworkTierRequest) MarshalJSON() ([]byte, error) { // IP prefix is a single unit of route advertisement and is announced // globally to the internet. type PublicAdvertisedPrefix struct { + // ByoipApiVersion: [Output Only] The version of BYOIP API. + // + // Possible values: + // "V1" - This public advertised prefix can be used to create both + // regional and global public delegated prefixes. It usually takes 4 + // weeks to create or delete a public delegated prefix. The BGP status + // cannot be changed. + // "V2" - This public advertised prefix can only be used to create + // regional public delegated prefixes. Public delegated prefix creation + // and deletion takes minutes and the BGP status can be modified. + ByoipApiVersion string `json:"byoipApiVersion,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -37182,6 +38118,24 @@ type PublicAdvertisedPrefix struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // PdpScope: Specifies how child public delegated prefix will be scoped. + // It could be one of following values: - `REGIONAL`: The public + // delegated prefix is regional only. The provisioning will take a few + // minutes. - `GLOBAL`: The public delegated prefix is global only. The + // provisioning will take ~4 weeks. - `GLOBAL_AND_REGIONAL` [output + // only]: The public delegated prefixes is BYOIP V1 legacy prefix. This + // is output only value and no longer supported in BYOIP V2. + // + // Possible values: + // "GLOBAL" - The public delegated prefix is global only. The + // provisioning will take ~4 weeks. + // "GLOBAL_AND_REGIONAL" - The public delegated prefixes is BYOIP V1 + // legacy prefix. This is output only value and no longer supported in + // BYOIP V2. + // "REGIONAL" - The public delegated prefix is regional only. The + // provisioning will take a few minutes. + PdpScope string `json:"pdpScope,omitempty"` + // PublicDelegatedPrefixs: [Output Only] The list of public delegated // prefixes that exist for this public advertised prefix. PublicDelegatedPrefixs []*PublicAdvertisedPrefixPublicDelegatedPrefix `json:"publicDelegatedPrefixs,omitempty"` @@ -37203,12 +38157,15 @@ type PublicAdvertisedPrefix struct { // removed. // // Possible values: + // "ANNOUNCED_TO_INTERNET" - The prefix is announced to Internet. // "INITIAL" - RPKI validation is complete. // "PREFIX_CONFIGURATION_COMPLETE" - The prefix is fully configured. // "PREFIX_CONFIGURATION_IN_PROGRESS" - The prefix is being // configured. // "PREFIX_REMOVAL_IN_PROGRESS" - The prefix is being removed. // "PTR_CONFIGURED" - User has configured the PTR. + // "READY_TO_ANNOUNCE" - The prefix is currently withdrawn but ready + // to be announced. // "REVERSE_DNS_LOOKUP_FAILED" - Reverse DNS lookup failed. // "VALIDATED" - Reverse DNS lookup is successful. Status string `json:"status,omitempty"` @@ -37217,15 +38174,15 @@ type PublicAdvertisedPrefix struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ByoipApiVersion") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ByoipApiVersion") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -37485,6 +38442,17 @@ func (s *PublicAdvertisedPrefixPublicDelegatedPrefix) MarshalJSON() ([]byte, err // may be further broken up into smaller IP blocks in the same scope as // the parent block. type PublicDelegatedPrefix struct { + // ByoipApiVersion: [Output Only] The version of BYOIP API. + // + // Possible values: + // "V1" - This public delegated prefix usually takes 4 weeks to + // delete, and the BGP status cannot be changed. Announce and Withdraw + // APIs can not be used on this prefix. + // "V2" - This public delegated prefix takes minutes to delete. + // Announce and Withdraw APIs can be used on this prefix to change the + // BGP status. + ByoipApiVersion string `json:"byoipApiVersion,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -37553,6 +38521,10 @@ type PublicDelegatedPrefix struct { // // Possible values: // "ANNOUNCED" - The public delegated prefix is active. + // "ANNOUNCED_TO_GOOGLE" - The prefix is announced within Google + // network. + // "ANNOUNCED_TO_INTERNET" - The prefix is announced to Internet and + // within Google. // "DELETING" - The public delegated prefix is being deprovsioned. // "INITIALIZING" - The public delegated prefix is being initialized // and addresses cannot be created yet. @@ -37564,15 +38536,15 @@ type PublicDelegatedPrefix struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "ByoipApiVersion") to + // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to + // NullFields is a list of field names (e.g. "ByoipApiVersion") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -40592,6 +41564,62 @@ func (s *RegionListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RegionNetworkEndpointGroupsAttachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be attached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionNetworkEndpointGroupsAttachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionNetworkEndpointGroupsAttachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type RegionNetworkEndpointGroupsDetachEndpointsRequest struct { + // NetworkEndpoints: The list of network endpoints to be detached. + NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkEndpoints") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkEndpoints") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *RegionNetworkEndpointGroupsDetachEndpointsRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionNetworkEndpointGroupsDetachEndpointsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse struct { // FirewallPolicys: Effective firewalls from firewall policy. FirewallPolicys []*RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy `json:"firewallPolicys,omitempty"` @@ -42867,7 +43895,10 @@ type Route struct { Description string `json:"description,omitempty"` // DestRange: The destination range of outgoing packets that this route - // applies to. Both IPv4 and IPv6 are supported. + // applies to. Both IPv4 and IPv6 are supported. Must specify an IPv4 + // range (e.g. 192.0.2.0/24) or an IPv6 range in RFC 4291 format (e.g. + // 2001:db8::/32). IPv6 range will be displayed using RFC 5952 + // compressed format. DestRange string `json:"destRange,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -42916,7 +43947,12 @@ type Route struct { NextHopInstance string `json:"nextHopInstance,omitempty"` // NextHopIp: The network IP address of an instance that should handle - // matching packets. Only IPv4 is supported. + // matching packets. Both IPv6 address and IPv4 addresses are supported. + // Must specify an IPv4 address in dot-decimal notation (e.g. + // 192.0.2.99) or an IPv6 address in RFC 4291 format (e.g. + // 2001:db8::2d9:51:0:0 or 2001:db8:0:0:2d9:51:0:0). IPv6 addresses will + // be displayed using RFC 5952 compressed format (e.g. + // 2001:db8::2d9:51:0:0). Should never be an IPv4-mapped IPv6 address. NextHopIp string `json:"nextHopIp,omitempty"` // NextHopNetwork: The URL of the local network if it should handle @@ -43402,9 +44438,12 @@ type Router struct { // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Interfaces: Router interfaces. Each interface requires either one - // linked resource, (for example, linkedVpnTunnel), or IP address and IP - // address range (for example, ipRange), or both. + // Interfaces: Router interfaces. To create a BGP peer that uses a + // router interface, the interface must have one of the following fields + // specified: - linkedVpnTunnel - linkedInterconnectAttachment - + // subnetwork You can create a router interface without any of these + // fields specified. However, you cannot create a BGP peer that uses + // that interface. Interfaces []*RouterInterface `json:"interfaces,omitempty"` // Kind: [Output Only] Type of resource. Always compute#router for @@ -44010,13 +45049,13 @@ type RouterInterface struct { // LinkedInterconnectAttachment: URI of the linked Interconnect // attachment. It must be in the same region as the router. Each // interface can have one linked resource, which can be a VPN tunnel, an - // Interconnect attachment, or a virtual machine instance. + // Interconnect attachment, or a subnetwork. LinkedInterconnectAttachment string `json:"linkedInterconnectAttachment,omitempty"` // LinkedVpnTunnel: URI of the linked VPN tunnel, which must be in the // same region as the router. Each interface can have one linked // resource, which can be a VPN tunnel, an Interconnect attachment, or a - // virtual machine instance. + // subnetwork. LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` // ManagementType: [Output Only] The resource that configures and @@ -44298,7 +45337,7 @@ type RouterMd5AuthenticationKey struct { Key string `json:"key,omitempty"` // Name: Name used to identify the key. Must be unique within a router. - // Must be referenced by at least one bgpPeer. Must comply with RFC1035. + // Must be referenced by exactly one bgpPeer. Must comply with RFC1035. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Key") to @@ -44332,8 +45371,8 @@ func (s *RouterMd5AuthenticationKey) MarshalJSON() ([]byte, error) { // no external IPs are provided. type RouterNat struct { // AutoNetworkTier: The network tier to use when automatically reserving - // IP addresses. Must be one of: PREMIUM, STANDARD. If not specified, - // PREMIUM tier will be used. + // NAT IP addresses. Must be one of: PREMIUM, STANDARD. If not + // specified, then the current project-level default tier is used. // // Possible values: // "FIXED_STANDARD" - Public internet quality with fixed bandwidth. @@ -44367,6 +45406,9 @@ type RouterNat struct { // ENDPOINT_TYPE_VM // // Possible values: + // "ENDPOINT_TYPE_MANAGED_PROXY_LB" - This is used for regional + // Application Load Balancers (internal and external) and regional proxy + // Network Load Balancers (internal and external) endpoints. // "ENDPOINT_TYPE_SWG" - This is used for Secure Web Gateway // endpoints. // "ENDPOINT_TYPE_VM" - This is the default. @@ -44545,8 +45587,8 @@ type RouterNatRule struct { // "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" The // following example is a valid match expression for private NAT: // "nexthop.hub == - // 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-proje - // ct/global/hub/hub-1'" + // '//networkconnectivity.googleapis.com/projects/my-project/locations/gl + // obal/hubs/hub-1'" Match string `json:"match,omitempty"` // RuleNumber: An integer uniquely identifying a rule in the list. The @@ -46190,6 +47232,15 @@ type SecurityPolicy struct { // "CLOUD_ARMOR_NETWORK" Type string `json:"type,omitempty"` + // UserDefinedFields: Definitions of user-defined fields for + // CLOUD_ARMOR_NETWORK policies. A user-defined field consists of up to + // 4 bytes extracted from a fixed offset in the packet, relative to the + // IPv4, IPv6, TCP, or UDP header, with an optional mask to select + // certain bits. Rules may then specify matching values for these + // fields. Example: userDefinedFields: - name: "ipv4_fragment_offset" + // base: IPV4 offset: 6 size: 2 mask: "0x1fff" + UserDefinedFields []*SecurityPolicyUserDefinedField `json:"userDefinedFields,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -46269,6 +47320,10 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { // "STANDARD" RuleVisibility string `json:"ruleVisibility,omitempty"` + // ThresholdConfigs: Configuration options for layer7 adaptive + // protection for various customizable thresholds. + ThresholdConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig `json:"thresholdConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -46292,6 +47347,62 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig struct { + AutoDeployConfidenceThreshold float64 `json:"autoDeployConfidenceThreshold,omitempty"` + + AutoDeployExpirationSec int64 `json:"autoDeployExpirationSec,omitempty"` + + AutoDeployImpactedBaselineThreshold float64 `json:"autoDeployImpactedBaselineThreshold,omitempty"` + + AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + + // Name: The name must be 1-63 characters long, and comply with RFC1035. + // The name must be unique within the security policy. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) UnmarshalJSON(data []byte) error { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + var s1 struct { + AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` + AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` + AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) + s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) + s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + return nil +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -46681,6 +47792,31 @@ type SecurityPolicyRule struct { // If it evaluates to true, the corresponding 'action' is enforced. Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + // NetworkMatch: A match condition that incoming packets are evaluated + // against for CLOUD_ARMOR_NETWORK security policies. If it matches, the + // corresponding 'action' is enforced. The match criteria for a rule + // consists of built-in match fields (like 'srcIpRanges') and + // potentially multiple user-defined match fields ('userDefinedFields'). + // Field values may be extracted directly from the packet or derived + // from it (e.g. 'srcRegionCodes'). Some fields may not be present in + // every packet (e.g. 'srcPorts'). A user-defined field is only present + // if the base header is found in the packet and the entire field is in + // bounds. Each match field may specify which values can match it, + // listing one or more ranges, prefixes, or exact values that are + // considered a match for the field. A field value must be present in + // order to match a specified match field. If no match values are + // specified for a match field, then any field value is considered to + // match it, and it's not required to be present. For strings specifying + // '*' is also equivalent to match all. For a packet to match a rule, + // all specified match fields must match the corresponding field values + // derived from the packet. Example: networkMatch: srcIpRanges: - + // "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: + // "ipv4_fragment_offset" values: - "1-0x1fff" The above match condition + // matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 + // and a user-defined field named "ipv4_fragment_offset" with a value + // between 1 and 0x1fff inclusive. + NetworkMatch *SecurityPolicyRuleNetworkMatcher `json:"networkMatch,omitempty"` + // PreconfiguredWafConfig: Preconfigured WAF configuration to be applied // for the rule. If the rule does not evaluate preconfigured WAF rules, // i.e., if evaluatePreconfiguredWaf() is not used, this field will have @@ -46872,6 +48008,99 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// SecurityPolicyRuleNetworkMatcher: Represents a match condition that +// incoming network traffic is evaluated against. +type SecurityPolicyRuleNetworkMatcher struct { + // DestIpRanges: Destination IPv4/IPv6 addresses or CIDR prefixes, in + // standard text format. + DestIpRanges []string `json:"destIpRanges,omitempty"` + + // DestPorts: Destination port numbers for TCP/UDP/SCTP. Each element + // can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. + // "0-1023"). + DestPorts []string `json:"destPorts,omitempty"` + + // IpProtocols: IPv4 protocol / IPv6 next header (after extension + // headers). Each element can be an 8-bit unsigned decimal number (e.g. + // "6"), range (e.g. "253-254"), or one of the following protocol names: + // "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". + IpProtocols []string `json:"ipProtocols,omitempty"` + + // SrcAsns: BGP Autonomous System Number associated with the source IP + // address. + SrcAsns []int64 `json:"srcAsns,omitempty"` + + // SrcIpRanges: Source IPv4/IPv6 addresses or CIDR prefixes, in standard + // text format. + SrcIpRanges []string `json:"srcIpRanges,omitempty"` + + // SrcPorts: Source port numbers for TCP/UDP/SCTP. Each element can be a + // 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + SrcPorts []string `json:"srcPorts,omitempty"` + + // SrcRegionCodes: Two-letter ISO 3166-1 alpha-2 country code associated + // with the source IP address. + SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` + + // UserDefinedFields: User-defined fields. Each element names a defined + // field and lists the matching values for that field. + UserDefinedFields []*SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch `json:"userDefinedFields,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestIpRanges") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestIpRanges") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleNetworkMatcher) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleNetworkMatcher + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch struct { + // Name: Name of the user-defined field, as given in the definition. + Name string `json:"name,omitempty"` + + // Values: Matching values of the field. Each element can be a 32-bit + // unsigned decimal or hexadecimal (starting with "0x") number (e.g. + // "64") or range (e.g. "0x400-0x7ff"). + Values []string `json:"values,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyRulePreconfiguredWafConfig struct { // Exclusions: A list of exclusions to apply during preconfigured WAF // evaluation. @@ -47232,9 +48461,71 @@ func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyUserDefinedField struct { + // Base: The base relative to which 'offset' is measured. Possible + // values are: - IPV4: Points to the beginning of the IPv4 header. - + // IPV6: Points to the beginning of the IPv6 header. - TCP: Points to + // the beginning of the TCP header, skipping over any IPv4 options or + // IPv6 extension headers. Not present for non-first fragments. - UDP: + // Points to the beginning of the UDP header, skipping over any IPv4 + // options or IPv6 extension headers. Not present for non-first + // fragments. required + // + // Possible values: + // "IPV4" + // "IPV6" + // "TCP" + // "UDP" + Base string `json:"base,omitempty"` + + // Mask: If specified, apply this mask (bitwise AND) to the field to + // ignore bits before matching. Encoded as a hexadecimal number + // (starting with "0x"). The last byte of the field (in network byte + // order) corresponds to the least significant byte of the mask. + Mask string `json:"mask,omitempty"` + + // Name: The name of this field. Must be unique within the policy. + Name string `json:"name,omitempty"` + + // Offset: Offset of the first byte of the field (in network byte order) + // relative to 'base'. + Offset int64 `json:"offset,omitempty"` + + // Size: Size of the field in bytes. Valid values: 1-4. + Size int64 `json:"size,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Base") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Base") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyUserDefinedField) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyUserDefinedField + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SecuritySettings: The authentication and authorization settings for a // BackendService. type SecuritySettings struct { + // AwsV4Authentication: The configuration needed to generate a signature + // for access to private storage buckets that support AWS's Signature + // Version 4 for authentication. Allowed only for INTERNET_IP_PORT and + // INTERNET_FQDN_PORT NEG backends. + AwsV4Authentication *AWSV4Signature `json:"awsV4Authentication,omitempty"` + // ClientTlsPolicy: Optional. A URL referring to a // networksecurity.ClientTlsPolicy resource that describes how clients // should authenticate with this service's backends. clientTlsPolicy @@ -47258,15 +48549,15 @@ type SecuritySettings struct { // attached clientTlsPolicy with clientCertificate (mTLS mode). SubjectAltNames []string `json:"subjectAltNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AwsV4Authentication") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientTlsPolicy") to + // NullFields is a list of field names (e.g. "AwsV4Authentication") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -47498,7 +48789,7 @@ type ServiceAttachment struct { // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED // PSC endpoint will be moved to REJECTED if its project is added to the // reject list. For newly created service attachment, this boolean - // defaults to true. + // defaults to false. ReconcileConnections bool `json:"reconcileConnections,omitempty"` // Region: [Output Only] URL of the region where the service attachment @@ -48188,6 +49479,81 @@ func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SetCommonInstanceMetadataOperationMetadata struct { + // ClientOperationId: [Output Only] The client operation id. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // PerLocationOperations: [Output Only] Status information per location + // (location name is key). Example key: zones/us-central1-a + PerLocationOperations map[string]SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo `json:"perLocationOperations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { + // Error: [Output Only] If state is `ABANDONED` or `FAILED`, this field + // is populated. + Error *Status `json:"error,omitempty"` + + // State: [Output Only] Status of the action, which can be one of the + // following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or + // `DONE`. + // + // Possible values: + // "ABANDONED" - Operation not tracked in this location e.g. zone is + // marked as DOWN. + // "DONE" - Operation has completed successfully. + // "FAILED" - Operation is in an error state. + // "PROPAGATED" - Operation is confirmed to be in the location. + // "PROPAGATING" - Operation is not yet confirmed to have been created + // in the location. + // "UNSPECIFIED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ShareSettings: The share setting for reservations and sole tenancy // node groups. type ShareSettings struct { @@ -48563,6 +49929,10 @@ type Snapshot struct { // customer-supplied encryption key. SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + // SourceDiskForRecoveryCheckpoint: The source disk whose recovery + // checkpoint will be used to create this snapshot. + SourceDiskForRecoveryCheckpoint string `json:"sourceDiskForRecoveryCheckpoint,omitempty"` + // SourceDiskId: [Output Only] The ID value of the disk used to create // this snapshot. This value may be used to determine whether the // snapshot was taken from the current or a previous instance of a given @@ -48826,6 +50196,112 @@ func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SnapshotSettings struct { + // StorageLocation: Policy of which storage location is going to be + // resolved, and additional data that particularizes how the policy is + // going to be carried out. + StorageLocation *SnapshotSettingsStorageLocationSettings `json:"storageLocation,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "StorageLocation") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageLocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettings) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SnapshotSettingsStorageLocationSettings struct { + // Locations: When the policy is SPECIFIC_LOCATIONS, snapshots will be + // stored in the locations listed in this field. Keys are GCS bucket + // locations. + Locations map[string]SnapshotSettingsStorageLocationSettingsStorageLocationPreference `json:"locations,omitempty"` + + // Policy: The chosen location policy. + // + // Possible values: + // "LOCAL_REGION" - Store snapshot in the same region as with the + // originating disk. No additional parameters are needed. + // "NEAREST_MULTI_REGION" - Store snapshot to the nearest multi region + // GCS bucket, relative to the originating disk. No additional + // parameters are needed. + // "SPECIFIC_LOCATIONS" - Store snapshot in the specific locations, as + // specified by the user. The list of regions to store must be defined + // under the `locations` field. + // "STORAGE_LOCATION_POLICY_UNSPECIFIED" + Policy string `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettingsStorageLocationSettings) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettingsStorageLocationSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SnapshotSettingsStorageLocationSettingsStorageLocationPreference: A +// structure for specifying storage locations. +type SnapshotSettingsStorageLocationSettingsStorageLocationPreference struct { + // Name: Name of the location. It should be one of the GCS buckets. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettingsStorageLocationSettingsStorageLocationPreference) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettingsStorageLocationSettingsStorageLocationPreference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SourceDiskEncryptionKey struct { // DiskEncryptionKey: The customer-supplied encryption key of the source // disk. Required if the source disk is protected by a customer-supplied @@ -49003,16 +50479,19 @@ func (s *SourceInstanceProperties) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// SslCertificate: Represents an SSL Certificate resource. Google -// Compute Engine has two SSL Certificate resources: * Global +// SslCertificate: Represents an SSL certificate resource. Google +// Compute Engine has two SSL certificate resources: * Global // (/compute/docs/reference/rest/v1/sslCertificates) * Regional -// (/compute/docs/reference/rest/v1/regionSslCertificates) The -// sslCertificates are used by: - external HTTPS load balancers - SSL -// proxy load balancers The regionSslCertificates are used by internal -// HTTPS load balancers. Optionally, certificate file contents that you -// upload can contain a set of up to five PEM-encoded certificates. The -// API call creates an object (sslCertificate) that holds this data. You -// can use SSL keys and certificates to secure connections to a load +// (/compute/docs/reference/rest/v1/regionSslCertificates) The global +// SSL certificates (sslCertificates) are used by: - Global external +// Application Load Balancers - Classic Application Load Balancers - +// Proxy Network Load Balancers (with target SSL proxies) The regional +// SSL certificates (regionSslCertificates) are used by: - Regional +// external Application Load Balancers - Regional internal Application +// Load Balancers Optionally, certificate file contents that you upload +// can contain a set of up to five PEM-encoded certificates. The API +// call creates an object (sslCertificate) that holds this data. You can +// use SSL keys and certificates to secure connections to a load // balancer. For more information, read Creating and using SSL // certificates, SSL certificates quotas and limits, and Troubleshooting // SSL certificates. @@ -50670,6 +52149,16 @@ type StatefulPolicyPreservedState struct { // of the disks. Disks map[string]StatefulPolicyPreservedStateDiskDevice `json:"disks,omitempty"` + // ExternalIPs: External network IPs assigned to the instances that will + // be preserved on instance delete, update, etc. This map is keyed with + // the network interface name. + ExternalIPs map[string]StatefulPolicyPreservedStateNetworkIp `json:"externalIPs,omitempty"` + + // InternalIPs: Internal network IPs assigned to the instances that will + // be preserved on instance delete, update, etc. This map is keyed with + // the network interface name. + InternalIPs map[string]StatefulPolicyPreservedStateNetworkIp `json:"internalIPs,omitempty"` + // ForceSendFields is a list of field names (e.g. "Disks") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -50729,6 +52218,85 @@ func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type StatefulPolicyPreservedStateNetworkIp struct { + // AutoDelete: These stateful IPs will never be released during + // autohealing, update or VM instance recreate operations. This flag is + // used to configure if the IP reservation should be deleted after it is + // no longer used by the group, e.g. when the given instance or the + // whole group is deleted. + // + // Possible values: + // "NEVER" + // "ON_PERMANENT_INSTANCE_DELETION" + AutoDelete string `json:"autoDelete,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AutoDelete") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AutoDelete") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *StatefulPolicyPreservedStateNetworkIp) MarshalJSON() ([]byte, error) { + type NoMethod StatefulPolicyPreservedStateNetworkIp + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Subnetwork: Represents a Subnetwork resource. A subnetwork (also // known as a subnet) is a logical partition of a Virtual Private Cloud // network with one primary IP range and zero or more secondary IP @@ -50858,6 +52426,8 @@ type Subnetwork struct { // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: + // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Envoy-based + // Load Balancing. // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal // HTTP(S) Load Balancing. // "PRIVATE" - Regular user created or automatically created subnet. @@ -50865,8 +52435,8 @@ type Subnetwork struct { // subnet. // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service // Connect in the producer network. - // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional - // Internal/External HTTP(S) Load Balancing. + // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional Envoy-based + // Load Balancing. Purpose string `json:"purpose,omitempty"` // Region: URL of the region where the Subnetwork resides. This field @@ -52325,12 +53895,15 @@ func (s *TargetHttpProxiesScopedListWarningData) MarshalJSON() ([]byte, error) { // Compute Engine has two Target HTTP Proxy resources: * Global // (/compute/docs/reference/rest/v1/targetHttpProxies) * Regional // (/compute/docs/reference/rest/v1/regionTargetHttpProxies) A target -// HTTP proxy is a component of GCP HTTP load balancers. * -// targetHttpProxies are used by external HTTP load balancers and -// Traffic Director. * regionTargetHttpProxies are used by internal HTTP -// load balancers. Forwarding rules reference a target HTTP proxy, and -// the target proxy then references a URL map. For more information, -// read Using Target Proxies and Forwarding rule concepts. +// HTTP proxy is a component of Google Cloud HTTP load balancers. * +// targetHttpProxies are used by global external Application Load +// Balancers, classic Application Load Balancers, cross-region internal +// Application Load Balancers, and Traffic Director. * +// regionTargetHttpProxies are used by regional internal Application +// Load Balancers and regional external Application Load Balancers. +// Forwarding rules reference a target HTTP proxy, and the target proxy +// then references a URL map. For more information, read Using Target +// Proxies and Forwarding rule concepts. type TargetHttpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -52352,10 +53925,10 @@ type TargetHttpProxy struct { // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection // open, after completing a response, while there is no matching traffic // (in seconds). If an HTTP keep-alive is not specified, a default value - // (610 seconds) will be used. For Global external HTTP(S) load - // balancer, the minimum allowed value is 5 seconds and the maximum - // allowed value is 1200 seconds. For Global external HTTP(S) load - // balancer (classic), this option is not available publicly. + // (610 seconds) will be used. For global external Application Load + // Balancers, the minimum allowed value is 5 seconds and the maximum + // allowed value is 1200 seconds. For classic Application Load + // Balancers, this option is not supported. HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -52947,8 +54520,11 @@ func (s *TargetHttpsProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, err // (/compute/docs/reference/rest/v1/targetHttpsProxies) * Regional // (/compute/docs/reference/rest/v1/regionTargetHttpsProxies) A target // HTTPS proxy is a component of GCP HTTPS load balancers. * -// targetHttpsProxies are used by external HTTPS load balancers. * -// regionTargetHttpsProxies are used by internal HTTPS load balancers. +// targetHttpProxies are used by global external Application Load +// Balancers, classic Application Load Balancers, cross-region internal +// Application Load Balancers, and Traffic Director. * +// regionTargetHttpProxies are used by regional internal Application +// Load Balancers and regional external Application Load Balancers. // Forwarding rules reference a target HTTPS proxy, and the target proxy // then references a URL map. For more information, read Using Target // Proxies and Forwarding rule concepts. @@ -52991,10 +54567,10 @@ type TargetHttpsProxy struct { // HttpKeepAliveTimeoutSec: Specifies how long to keep a connection // open, after completing a response, while there is no matching traffic // (in seconds). If an HTTP keep-alive is not specified, a default value - // (610 seconds) will be used. For Global external HTTP(S) load - // balancer, the minimum allowed value is 5 seconds and the maximum - // allowed value is 1200 seconds. For Global external HTTP(S) load - // balancer (classic), this option is not available publicly. + // (610 seconds) will be used. For global external Application Load + // Balancers, the minimum allowed value is 5 seconds and the maximum + // allowed value is 1200 seconds. For classic Application Load + // Balancers, this option is not supported. HttpKeepAliveTimeoutSec int64 `json:"httpKeepAliveTimeoutSec,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -53552,6 +55128,10 @@ type TargetInstance struct { // network that the default network interface belongs to. Network string `json:"network,omitempty"` + // SecurityPolicy: [Output Only] The resource URL for the security + // policy associated with this target instance. + SecurityPolicy string `json:"securityPolicy,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -54220,6 +55800,10 @@ type TargetPool struct { // resides. Region string `json:"region,omitempty"` + // SecurityPolicy: [Output Only] The resource URL for the security + // policy associated with this target pool. + SecurityPolicy string `json:"securityPolicy,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -56968,19 +58552,21 @@ func (s *Uint128) MarshalJSON() ([]byte, error) { // resources: * Global (/compute/docs/reference/rest/v1/urlMaps) * // Regional (/compute/docs/reference/rest/v1/regionUrlMaps) A URL map // resource is a component of certain types of cloud load balancers and -// Traffic Director: * urlMaps are used by external HTTP(S) load -// balancers and Traffic Director. * regionUrlMaps are used by internal -// HTTP(S) load balancers. For a list of supported URL map features by -// the load balancer type, see the Load balancing features: Routing and -// traffic management table. For a list of supported URL map features -// for Traffic Director, see the Traffic Director features: Routing and -// traffic management table. This resource defines mappings from -// hostnames and URL paths to either a backend service or a backend -// bucket. To use the global urlMaps resource, the backend service must -// have a loadBalancingScheme of either EXTERNAL or -// INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the backend -// service must have a loadBalancingScheme of INTERNAL_MANAGED. For more -// information, read URL Map Concepts. +// Traffic Director: * urlMaps are used by global external Application +// Load Balancers, classic Application Load Balancers, and cross-region +// internal Application Load Balancers. * regionUrlMaps are used by +// internal Application Load Balancers, regional external Application +// Load Balancers and regional internal Application Load Balancers. For +// a list of supported URL map features by the load balancer type, see +// the Load balancing features: Routing and traffic management table. +// For a list of supported URL map features for Traffic Director, see +// the Traffic Director features: Routing and traffic management table. +// This resource defines mappings from hostnames and URL paths to either +// a backend service or a backend bucket. To use the global urlMaps +// resource, the backend service must have a loadBalancingScheme of +// either EXTERNAL or INTERNAL_SELF_MANAGED. To use the regionUrlMaps +// resource, the backend service must have a loadBalancingScheme of +// INTERNAL_MANAGED. For more information, read URL Map Concepts. type UrlMap struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -56993,8 +58579,8 @@ type UrlMap struct { // any weightedBackendServices, defaultService must not be set. // Conversely if defaultService is set, defaultRouteAction cannot // contain any weightedBackendServices. Only one of defaultRouteAction - // or defaultUrlRedirect must be set. URL maps for Classic external - // HTTP(S) load balancers only support the urlRewrite action within + // or defaultUrlRedirect must be set. URL maps for classic Application + // Load Balancers only support the urlRewrite action within // defaultRouteAction. defaultRouteAction has no effect when the URL map // is bound to a target gRPC proxy that has the validateForProxyless // field set to true. @@ -57827,27 +59413,27 @@ func (s *UrlMapsScopedListWarningData) MarshalJSON() ([]byte, error) { type UrlMapsValidateRequest struct { // LoadBalancingSchemes: Specifies the load balancer type(s) this - // validation request is for. Use EXTERNAL_MANAGED for HTTP/HTTPS - // External Global Load Balancer with Advanced Traffic Management. Use - // EXTERNAL for Classic HTTP/HTTPS External Global Load Balancer. Other - // load balancer types are not supported. For more information, refer to - // Choosing a load balancer. If unspecified, the load balancing scheme - // will be inferred from the backend service resources this URL map - // references. If that can not be inferred (for example, this URL map - // only references backend buckets, or this Url map is for rewrites and - // redirects only and doesn't reference any backends), EXTERNAL will be - // used as the default type. If specified, the scheme(s) must not - // conflict with the load balancing scheme of the backend service - // resources this Url map references. + // validation request is for. Use EXTERNAL_MANAGED for global external + // Application Load Balancers and regional external Application Load + // Balancers. Use EXTERNAL for classic Application Load Balancers. Use + // INTERNAL_MANAGED for internal Application Load Balancers. For more + // information, refer to Choosing a load balancer. If unspecified, the + // load balancing scheme will be inferred from the backend service + // resources this URL map references. If that can not be inferred (for + // example, this URL map only references backend buckets, or this Url + // map is for rewrites and redirects only and doesn't reference any + // backends), EXTERNAL will be used as the default type. If specified, + // the scheme(s) must not conflict with the load balancing scheme of the + // backend service resources this Url map references. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for Classic L7 - // External Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers. // "EXTERNAL_MANAGED" - Signifies that this will be used for - // Envoy-based L7 External Load Balancing. + // Envoy-based global external Application Load Balancers. // "LOAD_BALANCING_SCHEME_UNSPECIFIED" - If unspecified, the // validation will try to infer the scheme from the backend service - // resources this Url map references. If the inferrence is not possible, + // resources this Url map references. If the inference is not possible, // EXTERNAL will be used as the default type. LoadBalancingSchemes []string `json:"loadBalancingSchemes,omitempty"` @@ -58008,6 +59594,8 @@ type UsableSubnetwork struct { // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: + // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Envoy-based + // Load Balancing. // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal // HTTP(S) Load Balancing. // "PRIVATE" - Regular user created or automatically created subnet. @@ -58015,8 +59603,8 @@ type UsableSubnetwork struct { // subnet. // "PRIVATE_SERVICE_CONNECT" - Subnetworks created for Private Service // Connect in the producer network. - // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional - // Internal/External HTTP(S) Load Balancing. + // "REGIONAL_MANAGED_PROXY" - Subnetwork used for Regional Envoy-based + // Load Balancing. Purpose string `json:"purpose,omitempty"` // Role: The role of subnetwork. Currently, this field is only used when @@ -59660,13 +61248,13 @@ type VpnTunnel struct { // redundancy type. PeerExternalGatewayInterface int64 `json:"peerExternalGatewayInterface,omitempty"` - // PeerGcpGateway: URL of the peer side HA GCP VPN gateway to which this - // VPN tunnel is connected. Provided by the client when the VPN tunnel - // is created. This field can be used when creating highly available VPN + // PeerGcpGateway: URL of the peer side HA VPN gateway to which this VPN + // tunnel is connected. Provided by the client when the VPN tunnel is + // created. This field can be used when creating highly available VPN // from VPC network to VPC network, the field is exclusive with the // field peerExternalGateway. If provided, the VPN tunnel will - // automatically use the same vpnGatewayInterface ID in the peer GCP VPN - // gateway. + // automatically use the same vpnGatewayInterface ID in the peer Google + // Cloud VPN gateway. PeerGcpGateway string `json:"peerGcpGateway,omitempty"` // PeerIp: IP address of the peer VPN gateway. Only IPv4 is supported. @@ -61065,22 +62653,21 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -61095,7 +62682,8 @@ func (r *AcceleratorTypesService) AggregatedList(project string) *AcceleratorTyp // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AcceleratorTypesAggregatedListCall) Filter(filter string) *AcceleratorTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -61156,6 +62744,13 @@ func (c *AcceleratorTypesAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *AcceleratorTypesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *AcceleratorTypesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -61264,7 +62859,7 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -61302,6 +62897,11 @@ func (c *AcceleratorTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/acceleratorTypes", @@ -61538,22 +63138,21 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -61568,7 +63167,8 @@ func (r *AcceleratorTypesService) List(project string, zone string) *Accelerator // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AcceleratorTypesListCall) Filter(filter string) *AcceleratorTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -61726,7 +63326,7 @@ func (c *AcceleratorTypesListCall) Do(opts ...googleapi.CallOption) (*Accelerato // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -61826,22 +63426,21 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -61856,7 +63455,8 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -61917,6 +63517,13 @@ func (c *AddressesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *AddressesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *AddressesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -62025,7 +63632,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -62063,6 +63670,11 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/addresses", @@ -62654,22 +64266,21 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -62684,7 +64295,8 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AddressesListCall) Filter(filter string) *AddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -62842,7 +64454,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -63319,22 +64931,21 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -63349,7 +64960,8 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AutoscalersAggregatedListCall) Filter(filter string) *AutoscalersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -63410,6 +65022,13 @@ func (c *AutoscalersAggregatedListCall) ReturnPartialSuccess(returnPartialSucces return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *AutoscalersAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *AutoscalersAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -63518,7 +65137,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -63556,6 +65175,11 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/autoscalers", @@ -64147,22 +65771,21 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -64177,7 +65800,8 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *AutoscalersListCall) Filter(filter string) *AutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -64335,7 +65959,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -65471,6 +67095,180 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } +// method id "compute.backendBuckets.getIamPolicy": + +type BackendBucketsGetIamPolicyCall struct { + s *Service + project string + resource string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Gets the access control policy for a resource. May be +// empty if no such policy or resource exists. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendBucketsService) GetIamPolicy(project string, resource string) *BackendBucketsGetIamPolicyCall { + c := &BackendBucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": Requested IAM Policy version. +func (c *BackendBucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BackendBucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendBucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BackendBucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsGetIamPolicyCall) Context(ctx context.Context) *BackendBucketsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{resource}/getIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the access control policy for a resource. May be empty if no such policy or resource exists.", + // "flatPath": "projects/{project}/global/backendBuckets/{resource}/getIamPolicy", + // "httpMethod": "GET", + // "id": "compute.backendBuckets.getIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "optionsRequestedPolicyVersion": { + // "description": "Requested IAM Policy version.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{resource}/getIamPolicy", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.backendBuckets.insert": type BackendBucketsInsertCall struct { @@ -65661,22 +67459,21 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -65691,7 +67488,8 @@ func (r *BackendBucketsService) List(project string) *BackendBucketsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *BackendBucketsListCall) Filter(filter string) *BackendBucketsListCall { c.urlParams_.Set("filter", filter) return c @@ -65847,7 +67645,7 @@ func (c *BackendBucketsListCall) Do(opts ...googleapi.CallOption) (*BackendBucke // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -66271,6 +68069,319 @@ func (c *BackendBucketsSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOptio } +// method id "compute.backendBuckets.setIamPolicy": + +type BackendBucketsSetIamPolicyCall struct { + s *Service + project string + resource string + globalsetpolicyrequest *GlobalSetPolicyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Sets the access control policy on the specified +// resource. Replaces any existing policy. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendBucketsService) SetIamPolicy(project string, resource string, globalsetpolicyrequest *GlobalSetPolicyRequest) *BackendBucketsSetIamPolicyCall { + c := &BackendBucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.globalsetpolicyrequest = globalsetpolicyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BackendBucketsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsSetIamPolicyCall) Context(ctx context.Context) *BackendBucketsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetpolicyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{resource}/setIamPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BackendBucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", + // "flatPath": "projects/{project}/global/backendBuckets/{resource}/setIamPolicy", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.setIamPolicy", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{resource}/setIamPolicy", + // "request": { + // "$ref": "GlobalSetPolicyRequest" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendBuckets.testIamPermissions": + +type BackendBucketsTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendBucketsService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendBucketsTestIamPermissionsCall { + c := &BackendBucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendBucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendBucketsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendBucketsTestIamPermissionsCall) Context(ctx context.Context) *BackendBucketsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendBucketsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendBucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendBuckets/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendBuckets.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendBucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/backendBuckets/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.backendBuckets.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendBuckets/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.backendBuckets.update": type BackendBucketsUpdateCall struct { @@ -66650,22 +68761,21 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -66680,7 +68790,8 @@ func (r *BackendServicesService) AggregatedList(project string) *BackendServices // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *BackendServicesAggregatedListCall) Filter(filter string) *BackendServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -66741,6 +68852,13 @@ func (c *BackendServicesAggregatedListCall) ReturnPartialSuccess(returnPartialSu return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *BackendServicesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *BackendServicesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -66849,7 +68967,7 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -66887,6 +69005,11 @@ func (c *BackendServicesAggregatedListCall) Do(opts ...googleapi.CallOption) (*B // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/backendServices", @@ -67948,22 +70071,21 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -67978,7 +70100,8 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -68134,7 +70257,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -68203,111 +70326,178 @@ func (c *BackendServicesListCall) Pages(ctx context.Context, f func(*BackendServ } } -// method id "compute.backendServices.patch": +// method id "compute.backendServices.listUsable": -type BackendServicesPatchCall struct { - s *Service - project string - backendService string - backendservice *BackendService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BackendServicesListUsableCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Patch: Patches the specified BackendService resource with the data -// included in the request. For more information, see Backend services -// overview. This method supports PATCH semantics and uses the JSON -// merge patch format and processing rules. +// ListUsable: Retrieves an aggregated list of all usable backend +// services in the specified project. // -// - backendService: Name of the BackendService resource to patch. // - project: Project ID for this request. -func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { - c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BackendServicesService) ListUsable(project string) *BackendServicesListUsableCall { + c := &BackendServicesListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.backendService = backendService - c.backendservice = backendservice return c } -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { - c.urlParams_.Set("requestId", requestId) +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *BackendServicesListUsableCall) Filter(filter string) *BackendServicesListUsableCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *BackendServicesListUsableCall) MaxResults(maxResults int64) *BackendServicesListUsableCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *BackendServicesListUsableCall) OrderBy(orderBy string) *BackendServicesListUsableCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *BackendServicesListUsableCall) PageToken(pageToken string) *BackendServicesListUsableCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *BackendServicesListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *BackendServicesListUsableCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { +func (c *BackendServicesListUsableCall) Fields(s ...googleapi.Field) *BackendServicesListUsableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BackendServicesListUsableCall) IfNoneMatch(entityTag string) *BackendServicesListUsableCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { +func (c *BackendServicesListUsableCall) Context(ctx context.Context) *BackendServicesListUsableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesPatchCall) Header() http.Header { +func (c *BackendServicesListUsableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesListUsableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/listUsable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "backendService": c.backendService, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.patch" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +// Do executes the "compute.backendServices.listUsable" call. +// Exactly one of *BackendServiceListUsable or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceListUsable.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesListUsableCall) Do(opts ...googleapi.CallOption) (*BackendServiceListUsable, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68326,7 +70516,7 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Operation{ + ret := &BackendServiceListUsable{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -68338,20 +70528,35 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/global/backendServices/{backendService}", - // "httpMethod": "PATCH", - // "id": "compute.backendServices.patch", + // "description": "Retrieves an aggregated list of all usable backend services in the specified project.", + // "flatPath": "projects/{project}/global/backendServices/listUsable", + // "httpMethod": "GET", + // "id": "compute.backendServices.listUsable", // "parameterOrder": [ - // "project", - // "backendService" + // "project" // ], // "parameters": { - // "backendService": { - // "description": "Name of the BackendService resource to patch.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", // "type": "string" // }, // "project": { @@ -68361,51 +70566,70 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, // "required": true, // "type": "string" // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", - // "type": "string" + // "type": "boolean" // } // }, - // "path": "projects/{project}/global/backendServices/{backendService}", - // "request": { - // "$ref": "BackendService" - // }, + // "path": "projects/{project}/global/backendServices/listUsable", // "response": { - // "$ref": "Operation" + // "$ref": "BackendServiceListUsable" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" // ] // } } -// method id "compute.backendServices.setEdgeSecurityPolicy": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BackendServicesListUsableCall) Pages(ctx context.Context, f func(*BackendServiceListUsable) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BackendServicesSetEdgeSecurityPolicyCall struct { - s *Service - project string - backendService string - securitypolicyreference *SecurityPolicyReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "compute.backendServices.patch": + +type BackendServicesPatchCall struct { + s *Service + project string + backendService string + backendservice *BackendService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetEdgeSecurityPolicy: Sets the edge security policy for the -// specified backend service. +// Patch: Patches the specified BackendService resource with the data +// included in the request. For more information, see Backend services +// overview. This method supports PATCH semantics and uses the JSON +// merge patch format and processing rules. // -// - backendService: Name of the BackendService resource to which the -// edge security policy should be set. The name should conform to -// RFC1035. -// - project: Project ID for this request. -func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { - c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - backendService: Name of the BackendService resource to patch. +// - project: Project ID for this request. +func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall { + c := &BackendServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.backendService = backendService - c.securitypolicyreference = securitypolicyreference + c.backendservice = backendservice return c } @@ -68420,7 +70644,7 @@ func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendSe // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *BackendServicesPatchCall) RequestId(requestId string) *BackendServicesPatchCall { c.urlParams_.Set("requestId", requestId) return c } @@ -68428,7 +70652,7 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -68436,21 +70660,21 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetEdgeSecurityPolicyCall { +func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServicesPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Header() http.Header { +func (c *BackendServicesPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -68458,16 +70682,16 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http. } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -68479,14 +70703,193 @@ func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http. return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.backendServices.setEdgeSecurityPolicy" call. +// Do executes the "compute.backendServices.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified BackendService resource with the data included in the request. For more information, see Backend services overview. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/global/backendServices/{backendService}", + // "httpMethod": "PATCH", + // "id": "compute.backendServices.patch", + // "parameterOrder": [ + // "project", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{backendService}", + // "request": { + // "$ref": "BackendService" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.backendServices.setEdgeSecurityPolicy": + +type BackendServicesSetEdgeSecurityPolicyCall struct { + s *Service + project string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetEdgeSecurityPolicy: Sets the edge security policy for the +// specified backend service. +// +// - backendService: Name of the BackendService resource to which the +// edge security policy should be set. The name should conform to +// RFC1035. +// - project: Project ID for this request. +func (r *BackendServicesService) SetEdgeSecurityPolicy(project string, backendService string, securitypolicyreference *SecurityPolicyReference) *BackendServicesSetEdgeSecurityPolicyCall { + c := &BackendServicesSetEdgeSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *BackendServicesSetEdgeSecurityPolicyCall) RequestId(requestId string) *BackendServicesSetEdgeSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Fields(s ...googleapi.Field) *BackendServicesSetEdgeSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Context(ctx context.Context) *BackendServicesSetEdgeSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesSetEdgeSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{backendService}/setEdgeSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.setEdgeSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BackendServicesSetEdgeSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -68894,6 +71297,163 @@ func (c *BackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) } +// method id "compute.backendServices.testIamPermissions": + +type BackendServicesTestIamPermissionsCall struct { + s *Service + project string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - resource: Name or id of the resource for this request. +func (r *BackendServicesService) TestIamPermissions(project string, resource string, testpermissionsrequest *TestPermissionsRequest) *BackendServicesTestIamPermissionsCall { + c := &BackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *BackendServicesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BackendServicesTestIamPermissionsCall) Context(ctx context.Context) *BackendServicesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BackendServicesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/backendServices/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.backendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/global/backendServices/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.backendServices.testIamPermissions", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/backendServices/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.backendServices.update": type BackendServicesUpdateCall struct { @@ -69096,22 +71656,21 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -69126,7 +71685,8 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -69187,6 +71747,13 @@ func (c *DiskTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *DiskTypesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *DiskTypesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -69295,7 +71862,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -69333,6 +71900,11 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/diskTypes", @@ -69569,22 +72141,21 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -69599,7 +72170,8 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -69757,7 +72329,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -70047,22 +72619,21 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -70077,7 +72648,8 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -70138,6 +72710,13 @@ func (c *DisksAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *DisksAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *DisksAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -70246,7 +72825,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -70284,6 +72863,11 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/disks", @@ -71459,22 +74043,21 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -71489,7 +74072,8 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *DisksListCall) Filter(filter string) *DisksListCall { c.urlParams_.Set("filter", filter) return c @@ -71647,7 +74231,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -73905,22 +76489,21 @@ func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysLi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -73935,7 +76518,8 @@ func (r *ExternalVpnGatewaysService) List(project string) *ExternalVpnGatewaysLi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ExternalVpnGatewaysListCall) Filter(filter string) *ExternalVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -74091,7 +76675,7 @@ func (c *ExternalVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*Externa // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -75953,22 +78537,21 @@ func (r *FirewallPoliciesService) List() *FirewallPoliciesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -75983,7 +78566,8 @@ func (r *FirewallPoliciesService) List() *FirewallPoliciesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *FirewallPoliciesListCall) Filter(filter string) *FirewallPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -76142,7 +78726,7 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "id": "compute.firewallPolicies.list", // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -78005,22 +80589,21 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -78035,7 +80618,8 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall { c.urlParams_.Set("filter", filter) return c @@ -78191,7 +80775,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -78641,22 +81225,21 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -78671,7 +81254,8 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -78732,6 +81316,13 @@ func (c *ForwardingRulesAggregatedListCall) ReturnPartialSuccess(returnPartialSu return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *ForwardingRulesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *ForwardingRulesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -78840,7 +81431,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -78878,6 +81469,11 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/forwardingRules", @@ -79469,22 +82065,21 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -79499,7 +82094,8 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -79657,7 +82253,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -80818,22 +83414,21 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -80848,7 +83443,8 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall { c.urlParams_.Set("filter", filter) return c @@ -81004,7 +83600,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -81923,22 +84519,21 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -81953,7 +84548,8 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall { c.urlParams_.Set("filter", filter) return c @@ -82109,7 +84705,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -83563,22 +86159,21 @@ func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetwork // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -83593,7 +86188,8 @@ func (r *GlobalNetworkEndpointGroupsService) List(project string) *GlobalNetwork // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalNetworkEndpointGroupsListCall) Filter(filter string) *GlobalNetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -83749,7 +86345,7 @@ func (c *GlobalNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -83847,22 +86443,21 @@ func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -83877,7 +86472,8 @@ func (r *GlobalNetworkEndpointGroupsService) ListNetworkEndpoints(project string // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *GlobalNetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c @@ -84024,7 +86620,7 @@ func (c *GlobalNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googlea // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -84123,22 +86719,21 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -84153,7 +86748,8 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -84214,6 +86810,13 @@ func (c *GlobalOperationsAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *GlobalOperationsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *GlobalOperationsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -84322,7 +86925,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -84360,6 +86963,11 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/operations", @@ -84698,22 +87306,21 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -84728,7 +87335,8 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -84884,7 +87492,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -85409,22 +88017,21 @@ func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperatio // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -85439,7 +88046,8 @@ func (r *GlobalOrganizationOperationsService) List() *GlobalOrganizationOperatio // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalOrganizationOperationsListCall) Filter(filter string) *GlobalOrganizationOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -85596,7 +88204,7 @@ func (c *GlobalOrganizationOperationsListCall) Do(opts ...googleapi.CallOption) // "id": "compute.globalOrganizationOperations.list", // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -86180,22 +88788,21 @@ func (r *GlobalPublicDelegatedPrefixesService) List(project string) *GlobalPubli // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -86210,7 +88817,8 @@ func (r *GlobalPublicDelegatedPrefixesService) List(project string) *GlobalPubli // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *GlobalPublicDelegatedPrefixesListCall) Filter(filter string) *GlobalPublicDelegatedPrefixesListCall { c.urlParams_.Set("filter", filter) return c @@ -86366,7 +88974,7 @@ func (c *GlobalPublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -86639,22 +89247,21 @@ func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggreg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -86669,7 +89276,8 @@ func (r *HealthChecksService) AggregatedList(project string) *HealthChecksAggreg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *HealthChecksAggregatedListCall) Filter(filter string) *HealthChecksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -86730,6 +89338,13 @@ func (c *HealthChecksAggregatedListCall) ReturnPartialSuccess(returnPartialSucce return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *HealthChecksAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *HealthChecksAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -86838,7 +89453,7 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -86876,6 +89491,11 @@ func (c *HealthChecksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Heal // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/healthChecks", @@ -87428,22 +90048,21 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -87458,7 +90077,8 @@ func (r *HealthChecksService) List(project string) *HealthChecksListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *HealthChecksListCall) Filter(filter string) *HealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -87614,7 +90234,7 @@ func (c *HealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCheckLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -88554,22 +91174,21 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -88584,7 +91203,8 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -88740,7 +91360,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -89680,22 +92300,21 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -89710,7 +92329,8 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *HttpsHealthChecksListCall) Filter(filter string) *HttpsHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -89866,7 +92486,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -91513,22 +94133,21 @@ func (r *ImagesService) List(project string) *ImagesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -91543,7 +94162,8 @@ func (r *ImagesService) List(project string) *ImagesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ImagesListCall) Filter(filter string) *ImagesListCall { c.urlParams_.Set("filter", filter) return c @@ -91699,7 +94319,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -92639,22 +95259,21 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -92669,7 +95288,8 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupManagersAggregatedListCall) Filter(filter string) *InstanceGroupManagersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -92730,6 +95350,13 @@ func (c *InstanceGroupManagersAggregatedListCall) ReturnPartialSuccess(returnPar return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *InstanceGroupManagersAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *InstanceGroupManagersAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -92839,7 +95466,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -92877,6 +95504,11 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/instanceGroupManagers", @@ -94206,22 +96838,21 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -94236,7 +96867,8 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupManagersListCall) Filter(filter string) *InstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -94394,7 +97026,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -94505,22 +97137,21 @@ func (r *InstanceGroupManagersService) ListErrors(project string, zone string, i // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -94535,7 +97166,8 @@ func (r *InstanceGroupManagersService) ListErrors(project string, zone string, i // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupManagersListErrorsCall) Filter(filter string) *InstanceGroupManagersListErrorsCall { c.urlParams_.Set("filter", filter) return c @@ -94696,7 +97328,7 @@ func (c *InstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -94815,22 +97447,21 @@ func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -94845,7 +97476,8 @@ func (r *InstanceGroupManagersService) ListManagedInstances(project string, zone // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupManagersListManagedInstancesCall) Filter(filter string) *InstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -94994,7 +97626,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -95108,22 +97740,21 @@ func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zo // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -95138,7 +97769,8 @@ func (r *InstanceGroupManagersService) ListPerInstanceConfigs(project string, zo // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *InstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c @@ -95289,7 +97921,7 @@ func (c *InstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googleapi.C // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -96949,22 +99581,21 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -96979,7 +99610,8 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupsAggregatedListCall) Filter(filter string) *InstanceGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -97040,6 +99672,13 @@ func (c *InstanceGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuc return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *InstanceGroupsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *InstanceGroupsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -97148,7 +99787,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -97186,6 +99825,11 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/instanceGroups", @@ -97780,22 +100424,21 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -97810,7 +100453,8 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupsListCall) Filter(filter string) *InstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -97968,7 +100612,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -98078,22 +100722,21 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -98108,7 +100751,8 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceGroupsListInstancesCall) Filter(filter string) *InstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -98260,7 +100904,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -98747,22 +101391,21 @@ func (r *InstanceTemplatesService) AggregatedList(project string) *InstanceTempl // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -98777,7 +101420,8 @@ func (r *InstanceTemplatesService) AggregatedList(project string) *InstanceTempl // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceTemplatesAggregatedListCall) Filter(filter string) *InstanceTemplatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -98838,6 +101482,13 @@ func (c *InstanceTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartial return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *InstanceTemplatesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -98946,7 +101597,7 @@ func (c *InstanceTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -98984,6 +101635,11 @@ func (c *InstanceTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/instanceTemplates", @@ -99715,22 +102371,21 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -99745,7 +102400,8 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -99901,7 +102557,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -100699,22 +103355,21 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -100729,7 +103384,8 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -100790,6 +103446,13 @@ func (c *InstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *InstancesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *InstancesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -100898,7 +103561,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -100936,6 +103599,11 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/instances", @@ -103457,22 +106125,21 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -103487,7 +106154,8 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstancesListCall) Filter(filter string) *InstancesListCall { c.urlParams_.Set("filter", filter) return c @@ -103645,7 +106313,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -103756,22 +106424,21 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -103786,7 +106453,8 @@ func (r *InstancesService) ListReferrers(project string, zone string, instance s // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InstancesListReferrersCall) Filter(filter string) *InstancesListReferrersCall { c.urlParams_.Set("filter", filter) return c @@ -103946,7 +106614,7 @@ func (c *InstancesListReferrersCall) Do(opts ...googleapi.CallOption) (*Instance // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -106593,32 +109261,33 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.instances.setServiceAccount": +// method id "compute.instances.setSecurityPolicy": -type InstancesSetServiceAccountCall struct { +type InstancesSetSecurityPolicyCall struct { s *Service project string zone string instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified instance. For more information, see Google Cloud Armor +// Overview // -// - instance: Name of the instance resource to start. -// - project: Project ID for this request. -// - zone: The name of the zone for this request. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - instance: Name of the Instance resource to which the security +// policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. +// - zone: Name of the zone scoping this request. +func (r *InstancesService) SetSecurityPolicy(project string, zone string, instance string, instancessetsecuritypolicyrequest *InstancesSetSecurityPolicyRequest) *InstancesSetSecurityPolicyCall { + c := &InstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + c.instancessetsecuritypolicyrequest = instancessetsecuritypolicyrequest return c } @@ -106633,7 +109302,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { +func (c *InstancesSetSecurityPolicyCall) RequestId(requestId string) *InstancesSetSecurityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106641,7 +109310,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106649,21 +109318,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InstancesSetSecurityPolicyCall) Context(ctx context.Context) *InstancesSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InstancesSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106671,14 +109340,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetsecuritypolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -106693,14 +109362,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. +// Do executes the "compute.instances.setSecurityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106731,10 +109400,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "description": "Sets the Google Cloud Armor security policy for the specified instance. For more information, see Google Cloud Armor Overview", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "id": "compute.instances.setSecurityPolicy", // "parameterOrder": [ // "project", // "zone", @@ -106742,9 +109411,8 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to start.", + // "description": "Name of the Instance resource to which the security policy should be set. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, @@ -106761,16 +109429,16 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // }, // "zone": { - // "description": "The name of the zone for this request.", + // "description": "Name of the zone scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setSecurityPolicy", // "request": { - // "$ref": "InstancesSetServiceAccountRequest" + // "$ref": "InstancesSetSecurityPolicyRequest" // }, // "response": { // "$ref": "Operation" @@ -106783,33 +109451,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setShieldedInstanceIntegrityPolicy": +// method id "compute.instances.setServiceAccount": -type InstancesSetShieldedInstanceIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance -// integrity policy for an instance. You can only use this method on a -// running instance. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. // -// - instance: Name or id of the instance scoping this request. +// - instance: Name of the instance resource to start. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { - c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -106824,7 +109491,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -106832,7 +109499,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -106840,21 +109507,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -106862,16 +109529,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -106884,14 +109551,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -106922,10 +109589,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "httpMethod": "POST", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", @@ -106933,7 +109600,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C // ], // "parameters": { // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -106959,9 +109626,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "ShieldedInstanceIntegrityPolicy" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -106974,31 +109641,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } -// method id "compute.instances.setTags": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. // -// - instance: Name of the instance scoping this request. +// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.tags = tags + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } @@ -107013,7 +109682,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107021,7 +109690,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107029,21 +109698,21 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107051,16 +109720,16 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -107073,14 +109742,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -107111,10 +109780,10 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", @@ -107122,7 +109791,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -107148,9 +109817,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", // "request": { - // "$ref": "Tags" + // "$ref": "ShieldedInstanceIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -107163,29 +109832,31 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.instances.setTags": -type InstancesSimulateMaintenanceEventCall struct { +type InstancesSetTagsCall struct { s *Service project string zone string instance string + tags *Tags urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. -// For more information, see Simulate a host maintenance event. +// SetTags: Sets network tags for the specified instance to the data +// included in the request. // // - instance: Name of the instance scoping this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.tags = tags return c } @@ -107200,7 +109871,7 @@ func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { c.urlParams_.Set("requestId", requestId) return c } @@ -107208,7 +109879,7 @@ func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *Ins // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -107216,21 +109887,21 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -107238,9 +109909,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -107255,14 +109931,196 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Do executes the "compute.instances.setTags" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets network tags for the specified instance to the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.simulateMaintenanceEvent": + +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. +// For more information, see Simulate a host maintenance event. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -109343,22 +112201,21 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -109373,7 +112230,8 @@ func (r *InterconnectAttachmentsService) AggregatedList(project string) *Interco // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InterconnectAttachmentsAggregatedListCall) Filter(filter string) *InterconnectAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -109434,6 +112292,13 @@ func (c *InterconnectAttachmentsAggregatedListCall) ReturnPartialSuccess(returnP return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *InterconnectAttachmentsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *InterconnectAttachmentsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -109543,7 +112408,7 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -109581,6 +112446,11 @@ func (c *InterconnectAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOpt // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/interconnectAttachments", @@ -110186,22 +113056,21 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -110216,7 +113085,8 @@ func (r *InterconnectAttachmentsService) List(project string, region string) *In // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InterconnectAttachmentsListCall) Filter(filter string) *InterconnectAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -110374,7 +113244,7 @@ func (c *InterconnectAttachmentsListCall) Do(opts ...googleapi.CallOption) (*Int // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -111017,22 +113887,21 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -111047,7 +113916,8 @@ func (r *InterconnectLocationsService) List(project string) *InterconnectLocatio // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InterconnectLocationsListCall) Filter(filter string) *InterconnectLocationsListCall { c.urlParams_.Set("filter", filter) return c @@ -111203,7 +114073,7 @@ func (c *InterconnectLocationsListCall) Do(opts ...googleapi.CallOption) (*Inter // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -111460,22 +114330,21 @@ func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectR // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -111490,7 +114359,8 @@ func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectR // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { c.urlParams_.Set("filter", filter) return c @@ -111646,7 +114516,7 @@ func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -112055,7 +114925,11 @@ type InterconnectsGetDiagnosticsCall struct { } // GetDiagnostics: Returns the interconnectDiagnostics for the specified -// Interconnect. +// Interconnect. In the event of a global outage, do not use this API to +// make decisions about where to redirect your network traffic. Unlike a +// VLAN attachment, which is regional, a Cloud Interconnect connection +// is a global resource. A global outage can prevent this API from +// functioning properly. // // - interconnect: Name of the interconnect resource to query. // - project: Project ID for this request. @@ -112167,7 +115041,7 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } return ret, nil // { - // "description": "Returns the interconnectDiagnostics for the specified Interconnect.", + // "description": "Returns the interconnectDiagnostics for the specified Interconnect. In the event of a global outage, do not use this API to make decisions about where to redirect your network traffic. Unlike a VLAN attachment, which is regional, a Cloud Interconnect connection is a global resource. A global outage can prevent this API from functioning properly.", // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getDiagnostics", // "httpMethod": "GET", // "id": "compute.interconnects.getDiagnostics", @@ -112204,6 +115078,168 @@ func (c *InterconnectsGetDiagnosticsCall) Do(opts ...googleapi.CallOption) (*Int } +// method id "compute.interconnects.getMacsecConfig": + +type InterconnectsGetMacsecConfigCall struct { + s *Service + project string + interconnect string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetMacsecConfig: Returns the interconnectMacsecConfig for the +// specified Interconnect. +// +// - interconnect: Name of the interconnect resource to query. +// - project: Project ID for this request. +func (r *InterconnectsService) GetMacsecConfig(project string, interconnect string) *InterconnectsGetMacsecConfigCall { + c := &InterconnectsGetMacsecConfigCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnect = interconnect + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectsGetMacsecConfigCall) Fields(s ...googleapi.Field) *InterconnectsGetMacsecConfigCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectsGetMacsecConfigCall) IfNoneMatch(entityTag string) *InterconnectsGetMacsecConfigCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectsGetMacsecConfigCall) Context(ctx context.Context) *InterconnectsGetMacsecConfigCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectsGetMacsecConfigCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectsGetMacsecConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnects/{interconnect}/getMacsecConfig") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnect": c.interconnect, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnects.getMacsecConfig" call. +// Exactly one of *InterconnectsGetMacsecConfigResponse or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectsGetMacsecConfigResponse.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *InterconnectsGetMacsecConfigCall) Do(opts ...googleapi.CallOption) (*InterconnectsGetMacsecConfigResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InterconnectsGetMacsecConfigResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the interconnectMacsecConfig for the specified Interconnect.", + // "flatPath": "projects/{project}/global/interconnects/{interconnect}/getMacsecConfig", + // "httpMethod": "GET", + // "id": "compute.interconnects.getMacsecConfig", + // "parameterOrder": [ + // "project", + // "interconnect" + // ], + // "parameters": { + // "interconnect": { + // "description": "Name of the interconnect resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/interconnects/{interconnect}/getMacsecConfig", + // "response": { + // "$ref": "InterconnectsGetMacsecConfigResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.interconnects.insert": type InterconnectsInsertCall struct { @@ -112394,22 +115430,21 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -112424,7 +115459,8 @@ func (r *InterconnectsService) List(project string) *InterconnectsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *InterconnectsListCall) Filter(filter string) *InterconnectsListCall { c.urlParams_.Set("filter", filter) return c @@ -112580,7 +115616,7 @@ func (c *InterconnectsListCall) Do(opts ...googleapi.CallOption) (*InterconnectL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -114011,22 +117047,21 @@ func (r *LicensesService) List(project string) *LicensesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -114041,7 +117076,8 @@ func (r *LicensesService) List(project string) *LicensesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *LicensesListCall) Filter(filter string) *LicensesListCall { c.urlParams_.Set("filter", filter) return c @@ -114197,7 +117233,7 @@ func (c *LicensesListCall) Do(opts ...googleapi.CallOption) (*LicensesListRespon // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -115289,22 +118325,21 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -115319,7 +118354,8 @@ func (r *MachineImagesService) List(project string) *MachineImagesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *MachineImagesListCall) Filter(filter string) *MachineImagesListCall { c.urlParams_.Set("filter", filter) return c @@ -115475,7 +118511,7 @@ func (c *MachineImagesListCall) Do(opts ...googleapi.CallOption) (*MachineImageL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -115881,22 +118917,21 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -115911,7 +118946,8 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -115972,6 +119008,13 @@ func (c *MachineTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSucce return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *MachineTypesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *MachineTypesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -116080,7 +119123,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -116118,6 +119161,11 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/machineTypes", @@ -116354,22 +119402,21 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -116384,7 +119431,8 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -116542,7 +119590,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -116643,22 +119691,21 @@ func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttac // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -116673,7 +119720,8 @@ func (r *NetworkAttachmentsService) AggregatedList(project string) *NetworkAttac // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkAttachmentsAggregatedListCall) Filter(filter string) *NetworkAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -116734,6 +119782,13 @@ func (c *NetworkAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartia return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NetworkAttachmentsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NetworkAttachmentsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -116842,7 +119897,7 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -116880,6 +119935,11 @@ func (c *NetworkAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/networkAttachments", @@ -117661,22 +120721,21 @@ func (r *NetworkAttachmentsService) List(project string, region string) *Network // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -117691,7 +120750,8 @@ func (r *NetworkAttachmentsService) List(project string, region string) *Network // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkAttachmentsListCall) Filter(filter string) *NetworkAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -117849,7 +120909,7 @@ func (c *NetworkAttachmentsListCall) Do(opts ...googleapi.CallOption) (*NetworkA // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -117925,6 +120985,197 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA } } +// method id "compute.networkAttachments.patch": + +type NetworkAttachmentsPatchCall struct { + s *Service + project string + region string + networkAttachment string + networkattachment *NetworkAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified NetworkAttachment resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +// +// - networkAttachment: Name of the NetworkAttachment resource to patch. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *NetworkAttachmentsService) Patch(project string, region string, networkAttachment string, networkattachment *NetworkAttachment) *NetworkAttachmentsPatchCall { + c := &NetworkAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkAttachment = networkAttachment + c.networkattachment = networkattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsPatchCall) RequestId(requestId string) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsPatchCall) Fields(s ...googleapi.Field) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsPatchCall) Context(ctx context.Context) *NetworkAttachmentsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "PATCH", + // "id": "compute.networkAttachments.patch", + // "parameterOrder": [ + // "project", + // "region", + // "networkAttachment" + // ], + // "parameters": { + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "request": { + // "$ref": "NetworkAttachment" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.networkAttachments.setIamPolicy": type NetworkAttachmentsSetIamPolicyCall struct { @@ -118287,22 +121538,21 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -118317,7 +121567,8 @@ func (r *NetworkEdgeSecurityServicesService) AggregatedList(project string) *Net // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkEdgeSecurityServicesAggregatedListCall) Filter(filter string) *NetworkEdgeSecurityServicesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -118378,6 +121629,13 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) ReturnPartialSuccess(ret return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NetworkEdgeSecurityServicesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NetworkEdgeSecurityServicesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -118488,7 +121746,7 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -118526,6 +121784,11 @@ func (c *NetworkEdgeSecurityServicesAggregatedListCall) Do(opts ...googleapi.Cal // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/networkEdgeSecurityServices", @@ -119342,22 +122605,21 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -119372,7 +122634,8 @@ func (r *NetworkEndpointGroupsService) AggregatedList(project string) *NetworkEn // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkEndpointGroupsAggregatedListCall) Filter(filter string) *NetworkEndpointGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -119433,6 +122696,13 @@ func (c *NetworkEndpointGroupsAggregatedListCall) ReturnPartialSuccess(returnPar return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NetworkEndpointGroupsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NetworkEndpointGroupsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -119542,7 +122812,7 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -119580,6 +122850,11 @@ func (c *NetworkEndpointGroupsAggregatedListCall) Do(opts ...googleapi.CallOptio // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/networkEndpointGroups", @@ -120554,22 +123829,21 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -120584,7 +123858,8 @@ func (r *NetworkEndpointGroupsService) List(project string, zone string) *Networ // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkEndpointGroupsListCall) Filter(filter string) *NetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -120742,7 +124017,7 @@ func (c *NetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) (*Netwo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -120852,22 +124127,21 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -120882,7 +124156,8 @@ func (r *NetworkEndpointGroupsService) ListNetworkEndpoints(project string, zone // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *NetworkEndpointGroupsListNetworkEndpointsCall { c.urlParams_.Set("filter", filter) return c @@ -121036,7 +124311,7 @@ func (c *NetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.Cal // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -122902,22 +126177,21 @@ func (r *NetworkFirewallPoliciesService) List(project string) *NetworkFirewallPo // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -122932,7 +126206,8 @@ func (r *NetworkFirewallPoliciesService) List(project string) *NetworkFirewallPo // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworkFirewallPoliciesListCall) Filter(filter string) *NetworkFirewallPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -123088,7 +126363,7 @@ func (c *NetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*Fir // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -125048,22 +128323,21 @@ func (r *NetworksService) List(project string) *NetworksListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -125078,7 +128352,8 @@ func (r *NetworksService) List(project string) *NetworksListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworksListCall) Filter(filter string) *NetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -125234,7 +128509,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -125343,22 +128618,21 @@ func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -125373,7 +128647,8 @@ func (c *NetworksListPeeringRoutesCall) Direction(direction string) *NetworksLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NetworksListPeeringRoutesCall) Filter(filter string) *NetworksListPeeringRoutesCall { c.urlParams_.Set("filter", filter) return c @@ -125559,7 +128834,7 @@ func (c *NetworksListPeeringRoutesCall) Do(opts ...googleapi.CallOption) (*Excha // "type": "string" // }, // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -126559,22 +129834,21 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -126589,7 +129863,8 @@ func (r *NodeGroupsService) AggregatedList(project string) *NodeGroupsAggregated // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeGroupsAggregatedListCall) Filter(filter string) *NodeGroupsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -126650,6 +129925,13 @@ func (c *NodeGroupsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NodeGroupsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NodeGroupsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -126758,7 +130040,7 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -126796,6 +130078,11 @@ func (c *NodeGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeGr // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/nodeGroups", @@ -127775,22 +131062,21 @@ func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCal // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -127805,7 +131091,8 @@ func (r *NodeGroupsService) List(project string, zone string) *NodeGroupsListCal // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeGroupsListCall) Filter(filter string) *NodeGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -127963,7 +131250,7 @@ func (c *NodeGroupsListCall) Do(opts ...googleapi.CallOption) (*NodeGroupList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -128069,22 +131356,21 @@ func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup str // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -128099,7 +131385,8 @@ func (r *NodeGroupsService) ListNodes(project string, zone string, nodeGroup str // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeGroupsListNodesCall) Filter(filter string) *NodeGroupsListNodesCall { c.urlParams_.Set("filter", filter) return c @@ -128246,7 +131533,7 @@ func (c *NodeGroupsListNodesCall) Do(opts ...googleapi.CallOption) (*NodeGroupsL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -129256,22 +132543,21 @@ func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggr // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -129286,7 +132572,8 @@ func (r *NodeTemplatesService) AggregatedList(project string) *NodeTemplatesAggr // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeTemplatesAggregatedListCall) Filter(filter string) *NodeTemplatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -129347,6 +132634,13 @@ func (c *NodeTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSucc return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NodeTemplatesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NodeTemplatesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -129455,7 +132749,7 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -129493,6 +132787,11 @@ func (c *NodeTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Nod // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/nodeTemplates", @@ -130270,22 +133569,21 @@ func (r *NodeTemplatesService) List(project string, region string) *NodeTemplate // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -130300,7 +133598,8 @@ func (r *NodeTemplatesService) List(project string, region string) *NodeTemplate // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeTemplatesListCall) Filter(filter string) *NodeTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -130458,7 +133757,7 @@ func (c *NodeTemplatesListCall) Do(opts ...googleapi.CallOption) (*NodeTemplateL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -130895,22 +134194,21 @@ func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedLi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -130925,7 +134223,8 @@ func (r *NodeTypesService) AggregatedList(project string) *NodeTypesAggregatedLi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeTypesAggregatedListCall) Filter(filter string) *NodeTypesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -130986,6 +134285,13 @@ func (c *NodeTypesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *NodeTypesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *NodeTypesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -131094,7 +134400,7 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -131132,6 +134438,11 @@ func (c *NodeTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*NodeTyp // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/nodeTypes", @@ -131368,22 +134679,21 @@ func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -131398,7 +134708,8 @@ func (r *NodeTypesService) List(project string, zone string) *NodeTypesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *NodeTypesListCall) Filter(filter string) *NodeTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -131556,7 +134867,7 @@ func (c *NodeTypesListCall) Do(opts ...googleapi.CallOption) (*NodeTypeList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -131656,22 +134967,21 @@ func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirrorin // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -131686,7 +134996,8 @@ func (r *PacketMirroringsService) AggregatedList(project string) *PacketMirrorin // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *PacketMirroringsAggregatedListCall) Filter(filter string) *PacketMirroringsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -131747,6 +135058,13 @@ func (c *PacketMirroringsAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *PacketMirroringsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PacketMirroringsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -131855,7 +135173,7 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -131893,6 +135211,11 @@ func (c *PacketMirroringsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/packetMirrorings", @@ -132484,22 +135807,21 @@ func (r *PacketMirroringsService) List(project string, region string) *PacketMir // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -132514,7 +135836,8 @@ func (r *PacketMirroringsService) List(project string, region string) *PacketMir // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *PacketMirroringsListCall) Filter(filter string) *PacketMirroringsListCall { c.urlParams_.Set("filter", filter) return c @@ -132672,7 +135995,7 @@ func (c *PacketMirroringsListCall) Do(opts ...googleapi.CallOption) (*PacketMirr // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -134074,22 +137397,21 @@ func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourc // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -134104,7 +137426,8 @@ func (r *ProjectsService) GetXpnResources(project string) *ProjectsGetXpnResourc // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ProjectsGetXpnResourcesCall) Filter(filter string) *ProjectsGetXpnResourcesCall { c.urlParams_.Set("filter", filter) return c @@ -134260,7 +137583,7 @@ func (c *ProjectsGetXpnResourcesCall) Do(opts ...googleapi.CallOption) (*Project // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -134354,22 +137677,21 @@ func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsreque // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -134384,7 +137706,8 @@ func (r *ProjectsService) ListXpnHosts(project string, projectslistxpnhostsreque // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ProjectsListXpnHostsCall) Filter(filter string) *ProjectsListXpnHostsCall { c.urlParams_.Set("filter", filter) return c @@ -134532,7 +137855,7 @@ func (c *ProjectsListXpnHostsCall) Do(opts ...googleapi.CallOption) (*XpnHostLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -135441,6 +138764,172 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.publicAdvertisedPrefixes.announce": + +type PublicAdvertisedPrefixesAnnounceCall struct { + s *Service + project string + publicAdvertisedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Announce: Announces the specified PublicAdvertisedPrefix +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: The name of the public advertised prefix. +// It should comply with RFC1035. +func (r *PublicAdvertisedPrefixesService) Announce(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesAnnounceCall { + c := &PublicAdvertisedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicAdvertisedPrefix = publicAdvertisedPrefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesAnnounceCall) RequestId(requestId string) *PublicAdvertisedPrefixesAnnounceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicAdvertisedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesAnnounceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicAdvertisedPrefixesAnnounceCall) Context(ctx context.Context) *PublicAdvertisedPrefixesAnnounceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicAdvertisedPrefixesAnnounceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicAdvertisedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicAdvertisedPrefixes.announce" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Announces the specified PublicAdvertisedPrefix", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce", + // "httpMethod": "POST", + // "id": "compute.publicAdvertisedPrefixes.announce", + // "parameterOrder": [ + // "project", + // "publicAdvertisedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicAdvertisedPrefix": { + // "description": "The name of the public advertised prefix. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/announce", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.publicAdvertisedPrefixes.delete": type PublicAdvertisedPrefixesDeleteCall struct { @@ -135958,22 +139447,21 @@ func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertised // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -135988,7 +139476,8 @@ func (r *PublicAdvertisedPrefixesService) List(project string) *PublicAdvertised // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *PublicAdvertisedPrefixesListCall) Filter(filter string) *PublicAdvertisedPrefixesListCall { c.urlParams_.Set("filter", filter) return c @@ -136144,7 +139633,7 @@ func (c *PublicAdvertisedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pu // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -136392,6 +139881,172 @@ func (c *PublicAdvertisedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*O } +// method id "compute.publicAdvertisedPrefixes.withdraw": + +type PublicAdvertisedPrefixesWithdrawCall struct { + s *Service + project string + publicAdvertisedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Withdraw: Withdraws the specified PublicAdvertisedPrefix +// +// - project: Project ID for this request. +// - publicAdvertisedPrefix: The name of the public advertised prefix. +// It should comply with RFC1035. +func (r *PublicAdvertisedPrefixesService) Withdraw(project string, publicAdvertisedPrefix string) *PublicAdvertisedPrefixesWithdrawCall { + c := &PublicAdvertisedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.publicAdvertisedPrefix = publicAdvertisedPrefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicAdvertisedPrefixesWithdrawCall) RequestId(requestId string) *PublicAdvertisedPrefixesWithdrawCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicAdvertisedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicAdvertisedPrefixesWithdrawCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicAdvertisedPrefixesWithdrawCall) Context(ctx context.Context) *PublicAdvertisedPrefixesWithdrawCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicAdvertisedPrefixesWithdrawCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicAdvertisedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "publicAdvertisedPrefix": c.publicAdvertisedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicAdvertisedPrefixes.withdraw" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicAdvertisedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Withdraws the specified PublicAdvertisedPrefix", + // "flatPath": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw", + // "httpMethod": "POST", + // "id": "compute.publicAdvertisedPrefixes.withdraw", + // "parameterOrder": [ + // "project", + // "publicAdvertisedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicAdvertisedPrefix": { + // "description": "The name of the public advertised prefix. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/publicAdvertisedPrefixes/{publicAdvertisedPrefix}/withdraw", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.publicDelegatedPrefixes.aggregatedList": type PublicDelegatedPrefixesAggregatedListCall struct { @@ -136417,22 +140072,21 @@ func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicD // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -136447,7 +140101,8 @@ func (r *PublicDelegatedPrefixesService) AggregatedList(project string) *PublicD // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *PublicDelegatedPrefixesAggregatedListCall) Filter(filter string) *PublicDelegatedPrefixesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -136508,6 +140163,13 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) ReturnPartialSuccess(returnP return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *PublicDelegatedPrefixesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *PublicDelegatedPrefixesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -136617,7 +140279,7 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -136655,6 +140317,11 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Do(opts ...googleapi.CallOpt // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/publicDelegatedPrefixes", @@ -136691,6 +140358,185 @@ func (c *PublicDelegatedPrefixesAggregatedListCall) Pages(ctx context.Context, f } } +// method id "compute.publicDelegatedPrefixes.announce": + +type PublicDelegatedPrefixesAnnounceCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Announce: Announces the specified PublicDelegatedPrefix in the given +// region. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: The name of the public delegated prefix. It +// should comply with RFC1035. +// - region: The name of the region where the public delegated prefix is +// located. It should comply with RFC1035. +func (r *PublicDelegatedPrefixesService) Announce(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesAnnounceCall { + c := &PublicDelegatedPrefixesAnnounceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicDelegatedPrefixesAnnounceCall) RequestId(requestId string) *PublicDelegatedPrefixesAnnounceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicDelegatedPrefixesAnnounceCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesAnnounceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicDelegatedPrefixesAnnounceCall) Context(ctx context.Context) *PublicDelegatedPrefixesAnnounceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicDelegatedPrefixesAnnounceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicDelegatedPrefixesAnnounceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicDelegatedPrefixes.announce" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesAnnounceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Announces the specified PublicDelegatedPrefix in the given region.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce", + // "httpMethod": "POST", + // "id": "compute.publicDelegatedPrefixes.announce", + // "parameterOrder": [ + // "project", + // "region", + // "publicDelegatedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "The name of the public delegated prefix. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the public delegated prefix is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/announce", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.publicDelegatedPrefixes.delete": type PublicDelegatedPrefixesDeleteCall struct { @@ -137251,22 +141097,21 @@ func (r *PublicDelegatedPrefixesService) List(project string, region string) *Pu // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -137281,7 +141126,8 @@ func (r *PublicDelegatedPrefixesService) List(project string, region string) *Pu // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *PublicDelegatedPrefixesListCall) Filter(filter string) *PublicDelegatedPrefixesListCall { c.urlParams_.Set("filter", filter) return c @@ -137439,7 +141285,7 @@ func (c *PublicDelegatedPrefixesListCall) Do(opts ...googleapi.CallOption) (*Pub // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -137706,6 +141552,185 @@ func (c *PublicDelegatedPrefixesPatchCall) Do(opts ...googleapi.CallOption) (*Op } +// method id "compute.publicDelegatedPrefixes.withdraw": + +type PublicDelegatedPrefixesWithdrawCall struct { + s *Service + project string + region string + publicDelegatedPrefix string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Withdraw: Withdraws the specified PublicDelegatedPrefix in the given +// region. +// +// - project: Project ID for this request. +// - publicDelegatedPrefix: The name of the public delegated prefix. It +// should comply with RFC1035. +// - region: The name of the region where the public delegated prefix is +// located. It should comply with RFC1035. +func (r *PublicDelegatedPrefixesService) Withdraw(project string, region string, publicDelegatedPrefix string) *PublicDelegatedPrefixesWithdrawCall { + c := &PublicDelegatedPrefixesWithdrawCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.publicDelegatedPrefix = publicDelegatedPrefix + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *PublicDelegatedPrefixesWithdrawCall) RequestId(requestId string) *PublicDelegatedPrefixesWithdrawCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *PublicDelegatedPrefixesWithdrawCall) Fields(s ...googleapi.Field) *PublicDelegatedPrefixesWithdrawCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *PublicDelegatedPrefixesWithdrawCall) Context(ctx context.Context) *PublicDelegatedPrefixesWithdrawCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *PublicDelegatedPrefixesWithdrawCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *PublicDelegatedPrefixesWithdrawCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "publicDelegatedPrefix": c.publicDelegatedPrefix, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.publicDelegatedPrefixes.withdraw" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *PublicDelegatedPrefixesWithdrawCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Withdraws the specified PublicDelegatedPrefix in the given region.", + // "flatPath": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw", + // "httpMethod": "POST", + // "id": "compute.publicDelegatedPrefixes.withdraw", + // "parameterOrder": [ + // "project", + // "region", + // "publicDelegatedPrefix" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "publicDelegatedPrefix": { + // "description": "The name of the public delegated prefix. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the public delegated prefix is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/publicDelegatedPrefixes/{publicDelegatedPrefix}/withdraw", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionAutoscalers.delete": type RegionAutoscalersDeleteCall struct { @@ -138261,22 +142286,21 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -138291,7 +142315,8 @@ func (r *RegionAutoscalersService) List(project string, region string) *RegionAu // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionAutoscalersListCall) Filter(filter string) *RegionAutoscalersListCall { c.urlParams_.Set("filter", filter) return c @@ -138449,7 +142474,7 @@ func (c *RegionAutoscalersListCall) Do(opts ...googleapi.CallOption) (*RegionAut // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -139817,22 +143842,21 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -139847,7 +143871,8 @@ func (r *RegionBackendServicesService) List(project string, region string) *Regi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionBackendServicesListCall) Filter(filter string) *RegionBackendServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -140005,7 +144030,7 @@ func (c *RegionBackendServicesListCall) Do(opts ...googleapi.CallOption) (*Backe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -140081,6 +144106,298 @@ func (c *RegionBackendServicesListCall) Pages(ctx context.Context, f func(*Backe } } +// method id "compute.regionBackendServices.listUsable": + +type RegionBackendServicesListUsableCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// ListUsable: Retrieves an aggregated list of all usable backend +// services in the specified project in the given region. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. It must be a +// string that meets the requirements in RFC1035. +func (r *RegionBackendServicesService) ListUsable(project string, region string) *RegionBackendServicesListUsableCall { + c := &RegionBackendServicesListUsableCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *RegionBackendServicesListUsableCall) Filter(filter string) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionBackendServicesListUsableCall) MaxResults(maxResults int64) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionBackendServicesListUsableCall) OrderBy(orderBy string) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionBackendServicesListUsableCall) PageToken(pageToken string) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionBackendServicesListUsableCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesListUsableCall) Fields(s ...googleapi.Field) *RegionBackendServicesListUsableCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionBackendServicesListUsableCall) IfNoneMatch(entityTag string) *RegionBackendServicesListUsableCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesListUsableCall) Context(ctx context.Context) *RegionBackendServicesListUsableCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesListUsableCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesListUsableCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/listUsable") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.listUsable" call. +// Exactly one of *BackendServiceListUsable or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *BackendServiceListUsable.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesListUsableCall) Do(opts ...googleapi.CallOption) (*BackendServiceListUsable, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &BackendServiceListUsable{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", + // "httpMethod": "GET", + // "id": "compute.regionBackendServices.listUsable", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request. It must be a string that meets the requirements in RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/listUsable", + // "response": { + // "$ref": "BackendServiceListUsable" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionBackendServicesListUsableCall) Pages(ctx context.Context, f func(*BackendServiceListUsable) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionBackendServices.patch": type RegionBackendServicesPatchCall struct { @@ -140440,6 +144757,365 @@ func (c *RegionBackendServicesSetIamPolicyCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionBackendServices.setSecurityPolicy": + +type RegionBackendServicesSetSecurityPolicyCall struct { + s *Service + project string + region string + backendService string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified backend service. For more information, see Google Cloud +// Armor Overview +// +// - backendService: Name of the BackendService resource to which the +// security policy should be set. The name should conform to RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionBackendServicesService) SetSecurityPolicy(project string, region string, backendService string, securitypolicyreference *SecurityPolicyReference) *RegionBackendServicesSetSecurityPolicyCall { + c := &RegionBackendServicesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.backendService = backendService + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionBackendServicesSetSecurityPolicyCall) RequestId(requestId string) *RegionBackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *RegionBackendServicesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesSetSecurityPolicyCall) Context(ctx context.Context) *RegionBackendServicesSetSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "backendService": c.backendService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionBackendServicesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the Google Cloud Armor security policy for the specified backend service. For more information, see Google Cloud Armor Overview", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.setSecurityPolicy", + // "parameterOrder": [ + // "project", + // "region", + // "backendService" + // ], + // "parameters": { + // "backendService": { + // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{backendService}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionBackendServices.testIamPermissions": + +type RegionBackendServicesTestIamPermissionsCall struct { + s *Service + project string + region string + resource string + testpermissionsrequest *TestPermissionsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Returns permissions that a caller has on the +// specified resource. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +// - resource: Name or id of the resource for this request. +func (r *RegionBackendServicesService) TestIamPermissions(project string, region string, resource string, testpermissionsrequest *TestPermissionsRequest) *RegionBackendServicesTestIamPermissionsCall { + c := &RegionBackendServicesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.resource = resource + c.testpermissionsrequest = testpermissionsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionBackendServicesTestIamPermissionsCall) Fields(s ...googleapi.Field) *RegionBackendServicesTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionBackendServicesTestIamPermissionsCall) Context(ctx context.Context) *RegionBackendServicesTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionBackendServicesTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionBackendServicesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.testpermissionsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "resource": c.resource, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionBackendServices.testIamPermissions" call. +// Exactly one of *TestPermissionsResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TestPermissionsResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionBackendServicesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns permissions that a caller has on the specified resource.", + // "flatPath": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", + // "httpMethod": "POST", + // "id": "compute.regionBackendServices.testIamPermissions", + // "parameterOrder": [ + // "project", + // "region", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/backendServices/{resource}/testIamPermissions", + // "request": { + // "$ref": "TestPermissionsRequest" + // }, + // "response": { + // "$ref": "TestPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.regionBackendServices.update": type RegionBackendServicesUpdateCall struct { @@ -140655,22 +145331,21 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -140685,7 +145360,8 @@ func (r *RegionCommitmentsService) AggregatedList(project string) *RegionCommitm // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionCommitmentsAggregatedListCall) Filter(filter string) *RegionCommitmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -140746,6 +145422,13 @@ func (c *RegionCommitmentsAggregatedListCall) ReturnPartialSuccess(returnPartial return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *RegionCommitmentsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *RegionCommitmentsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -140854,7 +145537,7 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -140892,6 +145575,11 @@ func (c *RegionCommitmentsAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/commitments", @@ -141305,22 +145993,21 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -141335,7 +146022,8 @@ func (r *RegionCommitmentsService) List(project string, region string) *RegionCo // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionCommitmentsListCall) Filter(filter string) *RegionCommitmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -141493,7 +146181,7 @@ func (c *RegionCommitmentsListCall) Do(opts ...googleapi.CallOption) (*Commitmen // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -141985,22 +146673,21 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -142015,7 +146702,8 @@ func (r *RegionDiskTypesService) List(project string, region string) *RegionDisk // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionDiskTypesListCall) Filter(filter string) *RegionDiskTypesListCall { c.urlParams_.Set("filter", filter) return c @@ -142173,7 +146861,7 @@ func (c *RegionDiskTypesListCall) Do(opts ...googleapi.CallOption) (*RegionDiskT // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -143561,22 +148249,21 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -143591,7 +148278,8 @@ func (r *RegionDisksService) List(project string, region string) *RegionDisksLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionDisksListCall) Filter(filter string) *RegionDisksListCall { c.urlParams_.Set("filter", filter) return c @@ -143749,7 +148437,7 @@ func (c *RegionDisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -146044,22 +150732,21 @@ func (r *RegionHealthCheckServicesService) List(project string, region string) * // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -146074,7 +150761,8 @@ func (r *RegionHealthCheckServicesService) List(project string, region string) * // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionHealthCheckServicesListCall) Filter(filter string) *RegionHealthCheckServicesListCall { c.urlParams_.Set("filter", filter) return c @@ -146232,7 +150920,7 @@ func (c *RegionHealthCheckServicesListCall) Do(opts ...googleapi.CallOption) (*H // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -147053,22 +151741,21 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -147083,7 +151770,8 @@ func (r *RegionHealthChecksService) List(project string, region string) *RegionH // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionHealthChecksListCall) Filter(filter string) *RegionHealthChecksListCall { c.urlParams_.Set("filter", filter) return c @@ -147241,7 +151929,7 @@ func (c *RegionHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HealthCh // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -149180,22 +153868,21 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -149210,7 +153897,8 @@ func (r *RegionInstanceGroupManagersService) List(project string, region string) // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupManagersListCall) Filter(filter string) *RegionInstanceGroupManagersListCall { c.urlParams_.Set("filter", filter) return c @@ -149368,7 +154056,7 @@ func (c *RegionInstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -149479,22 +154167,21 @@ func (r *RegionInstanceGroupManagersService) ListErrors(project string, region s // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -149509,7 +154196,8 @@ func (r *RegionInstanceGroupManagersService) ListErrors(project string, region s // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupManagersListErrorsCall) Filter(filter string) *RegionInstanceGroupManagersListErrorsCall { c.urlParams_.Set("filter", filter) return c @@ -149671,7 +154359,7 @@ func (c *RegionInstanceGroupManagersListErrorsCall) Do(opts ...googleapi.CallOpt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -149787,22 +154475,21 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -149817,7 +154504,8 @@ func (r *RegionInstanceGroupManagersService) ListManagedInstances(project string // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupManagersListManagedInstancesCall) Filter(filter string) *RegionInstanceGroupManagersListManagedInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -149966,7 +154654,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -150080,22 +154768,21 @@ func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project stri // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -150110,7 +154797,8 @@ func (r *RegionInstanceGroupManagersService) ListPerInstanceConfigs(project stri // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Filter(filter string) *RegionInstanceGroupManagersListPerInstanceConfigsCall { c.urlParams_.Set("filter", filter) return c @@ -150259,7 +154947,7 @@ func (c *RegionInstanceGroupManagersListPerInstanceConfigsCall) Do(opts ...googl // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -151885,22 +156573,21 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -151915,7 +156602,8 @@ func (r *RegionInstanceGroupsService) List(project string, region string) *Regio // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupsListCall) Filter(filter string) *RegionInstanceGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -152073,7 +156761,7 @@ func (c *RegionInstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*Region // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -152184,22 +156872,21 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -152214,7 +156901,8 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { c.urlParams_.Set("filter", filter) return c @@ -152367,7 +157055,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -153196,22 +157884,21 @@ func (r *RegionInstanceTemplatesService) List(project string, region string) *Re // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -153226,7 +157913,8 @@ func (r *RegionInstanceTemplatesService) List(project string, region string) *Re // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c @@ -153384,7 +158072,7 @@ func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -153637,6 +158325,196 @@ func (c *RegionInstancesBulkInsertCall) Do(opts ...googleapi.CallOption) (*Opera } +// method id "compute.regionNetworkEndpointGroups.attachNetworkEndpoints": + +type RegionNetworkEndpointGroupsAttachNetworkEndpointsCall struct { + s *Service + project string + region string + networkEndpointGroup string + regionnetworkendpointgroupsattachendpointsrequest *RegionNetworkEndpointGroupsAttachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AttachNetworkEndpoints: Attach a list of network endpoints to the +// specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group where +// you are attaching network endpoints to. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where you want to create the network +// endpoint group. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) AttachNetworkEndpoints(project string, region string, networkEndpointGroup string, regionnetworkendpointgroupsattachendpointsrequest *RegionNetworkEndpointGroupsAttachEndpointsRequest) *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall { + c := &RegionNetworkEndpointGroupsAttachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEndpointGroup = networkEndpointGroup + c.regionnetworkendpointgroupsattachendpointsrequest = regionnetworkendpointgroupsattachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) RequestId(requestId string) *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionnetworkendpointgroupsattachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.attachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsAttachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Attach a list of network endpoints to the specified network endpoint group.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.regionNetworkEndpointGroups.attachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "region", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group where you are attaching network endpoints to. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/attachNetworkEndpoints", + // "request": { + // "$ref": "RegionNetworkEndpointGroupsAttachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionNetworkEndpointGroups.delete": type RegionNetworkEndpointGroupsDeleteCall struct { @@ -153817,6 +158695,197 @@ func (c *RegionNetworkEndpointGroupsDeleteCall) Do(opts ...googleapi.CallOption) } +// method id "compute.regionNetworkEndpointGroups.detachNetworkEndpoints": + +type RegionNetworkEndpointGroupsDetachNetworkEndpointsCall struct { + s *Service + project string + region string + networkEndpointGroup string + regionnetworkendpointgroupsdetachendpointsrequest *RegionNetworkEndpointGroupsDetachEndpointsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// DetachNetworkEndpoints: Detach the network endpoint from the +// specified network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group you +// are detaching network endpoints from. It should comply with +// RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) DetachNetworkEndpoints(project string, region string, networkEndpointGroup string, regionnetworkendpointgroupsdetachendpointsrequest *RegionNetworkEndpointGroupsDetachEndpointsRequest) *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall { + c := &RegionNetworkEndpointGroupsDetachNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEndpointGroup = networkEndpointGroup + c.regionnetworkendpointgroupsdetachendpointsrequest = regionnetworkendpointgroupsdetachendpointsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) RequestId(requestId string) *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionnetworkendpointgroupsdetachendpointsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.detachNetworkEndpoints" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionNetworkEndpointGroupsDetachNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Detach the network endpoint from the specified network endpoint group.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.regionNetworkEndpointGroups.detachNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "region", + // "networkEndpointGroup" + // ], + // "parameters": { + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group you are detaching network endpoints from. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/detachNetworkEndpoints", + // "request": { + // "$ref": "RegionNetworkEndpointGroupsDetachEndpointsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionNetworkEndpointGroups.get": type RegionNetworkEndpointGroupsGetCall struct { @@ -154195,22 +159264,21 @@ func (r *RegionNetworkEndpointGroupsService) List(project string, region string) // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -154225,7 +159293,8 @@ func (r *RegionNetworkEndpointGroupsService) List(project string, region string) // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionNetworkEndpointGroupsListCall) Filter(filter string) *RegionNetworkEndpointGroupsListCall { c.urlParams_.Set("filter", filter) return c @@ -154383,7 +159452,7 @@ func (c *RegionNetworkEndpointGroupsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -154458,6 +159527,299 @@ func (c *RegionNetworkEndpointGroupsListCall) Pages(ctx context.Context, f func( } } +// method id "compute.regionNetworkEndpointGroups.listNetworkEndpoints": + +type RegionNetworkEndpointGroupsListNetworkEndpointsCall struct { + s *Service + project string + region string + networkEndpointGroup string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// ListNetworkEndpoints: Lists the network endpoints in the specified +// network endpoint group. +// +// - networkEndpointGroup: The name of the network endpoint group from +// which you want to generate a list of included network endpoints. It +// should comply with RFC1035. +// - project: Project ID for this request. +// - region: The name of the region where the network endpoint group is +// located. It should comply with RFC1035. +func (r *RegionNetworkEndpointGroupsService) ListNetworkEndpoints(project string, region string, networkEndpointGroup string) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c := &RegionNetworkEndpointGroupsListNetworkEndpointsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkEndpointGroup = networkEndpointGroup + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Filter(filter string) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) MaxResults(maxResults int64) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) OrderBy(orderBy string) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) PageToken(pageToken string) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Fields(s ...googleapi.Field) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Context(ctx context.Context) *RegionNetworkEndpointGroupsListNetworkEndpointsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkEndpointGroup": c.networkEndpointGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionNetworkEndpointGroups.listNetworkEndpoints" call. +// Exactly one of *NetworkEndpointGroupsListNetworkEndpoints or error +// will be non-nil. Any non-2xx status code is an error. Response +// headers are in either +// *NetworkEndpointGroupsListNetworkEndpoints.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Do(opts ...googleapi.CallOption) (*NetworkEndpointGroupsListNetworkEndpoints, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NetworkEndpointGroupsListNetworkEndpoints{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the network endpoints in the specified network endpoint group.", + // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "httpMethod": "POST", + // "id": "compute.regionNetworkEndpointGroups.listNetworkEndpoints", + // "parameterOrder": [ + // "project", + // "region", + // "networkEndpointGroup" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "networkEndpointGroup": { + // "description": "The name of the network endpoint group from which you want to generate a list of included network endpoints. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}/listNetworkEndpoints", + // "response": { + // "$ref": "NetworkEndpointGroupsListNetworkEndpoints" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionNetworkEndpointGroupsListNetworkEndpointsCall) Pages(ctx context.Context, f func(*NetworkEndpointGroupsListNetworkEndpoints) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regionNetworkFirewallPolicies.addAssociation": type RegionNetworkFirewallPoliciesAddAssociationCall struct { @@ -156354,22 +161716,21 @@ func (r *RegionNetworkFirewallPoliciesService) List(project string, region strin // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -156384,7 +161745,8 @@ func (r *RegionNetworkFirewallPoliciesService) List(project string, region strin // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionNetworkFirewallPoliciesListCall) Filter(filter string) *RegionNetworkFirewallPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -156542,7 +161904,7 @@ func (c *RegionNetworkFirewallPoliciesListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -158285,22 +163647,21 @@ func (r *RegionNotificationEndpointsService) List(project string, region string) // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -158315,7 +163676,8 @@ func (r *RegionNotificationEndpointsService) List(project string, region string) // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionNotificationEndpointsListCall) Filter(filter string) *RegionNotificationEndpointsListCall { c.urlParams_.Set("filter", filter) return c @@ -158473,7 +163835,7 @@ func (c *RegionNotificationEndpointsListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -158878,22 +164240,21 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -158908,7 +164269,8 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -159066,7 +164428,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -159311,6 +164673,185 @@ func (c *RegionOperationsWaitCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "compute.regionSecurityPolicies.addRule": + +type RegionSecurityPoliciesAddRuleCall struct { + s *Service + project string + region string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddRule: Inserts a rule into a security policy. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) AddRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesAddRuleCall { + c := &RegionSecurityPoliciesAddRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *RegionSecurityPoliciesAddRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesAddRuleCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesAddRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesAddRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesAddRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesAddRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesAddRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesAddRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.addRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesAddRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Inserts a rule into a security policy.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", + // "httpMethod": "POST", + // "id": "compute.regionSecurityPolicies.addRule", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/addRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionSecurityPolicies.delete": type RegionSecurityPoliciesDeleteCall struct { @@ -159662,6 +165203,192 @@ func (c *RegionSecurityPoliciesGetCall) Do(opts ...googleapi.CallOption) (*Secur } +// method id "compute.regionSecurityPolicies.getRule": + +type RegionSecurityPoliciesGetRuleCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetRule: Gets a rule at the specified priority. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to which the queried +// rule belongs. +func (r *RegionSecurityPoliciesService) GetRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesGetRuleCall { + c := &RegionSecurityPoliciesGetRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to get from the security policy. +func (c *RegionSecurityPoliciesGetRuleCall) Priority(priority int64) *RegionSecurityPoliciesGetRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesGetRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesGetRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionSecurityPoliciesGetRuleCall) IfNoneMatch(entityTag string) *RegionSecurityPoliciesGetRuleCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesGetRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesGetRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesGetRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesGetRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.getRule" call. +// Exactly one of *SecurityPolicyRule or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SecurityPolicyRule.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesGetRuleCall) Do(opts ...googleapi.CallOption) (*SecurityPolicyRule, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SecurityPolicyRule{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets a rule at the specified priority.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", + // "httpMethod": "GET", + // "id": "compute.regionSecurityPolicies.getRule", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to get from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to which the queried rule belongs.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/getRule", + // "response": { + // "$ref": "SecurityPolicyRule" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.regionSecurityPolicies.insert": type RegionSecurityPoliciesInsertCall struct { @@ -159879,22 +165606,21 @@ func (r *RegionSecurityPoliciesService) List(project string, region string) *Reg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -159909,7 +165635,8 @@ func (r *RegionSecurityPoliciesService) List(project string, region string) *Reg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionSecurityPoliciesListCall) Filter(filter string) *RegionSecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -160067,7 +165794,7 @@ func (c *RegionSecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*Secu // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -160190,6 +165917,13 @@ func (c *RegionSecurityPoliciesPatchCall) RequestId(requestId string) *RegionSec return c } +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be cleared as part of this request. +func (c *RegionSecurityPoliciesPatchCall) UpdateMask(updateMask string) *RegionSecurityPoliciesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -160318,6 +166052,12 @@ func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Ope // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Indicates fields to be cleared as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}", @@ -160335,6 +166075,383 @@ func (c *RegionSecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Ope } +// method id "compute.regionSecurityPolicies.patchRule": + +type RegionSecurityPoliciesPatchRuleCall struct { + s *Service + project string + region string + securityPolicy string + securitypolicyrule *SecurityPolicyRule + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// PatchRule: Patches a rule at the specified priority. To clear fields +// in the rule, leave the fields empty and specify them in the +// updateMask. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) PatchRule(project string, region string, securityPolicy string, securitypolicyrule *SecurityPolicyRule) *RegionSecurityPoliciesPatchRuleCall { + c := &RegionSecurityPoliciesPatchRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + c.securitypolicyrule = securitypolicyrule + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to patch. +func (c *RegionSecurityPoliciesPatchRuleCall) Priority(priority int64) *RegionSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be cleared as part of this request. +func (c *RegionSecurityPoliciesPatchRuleCall) UpdateMask(updateMask string) *RegionSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// ValidateOnly sets the optional parameter "validateOnly": If true, the +// request will not be committed. +func (c *RegionSecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *RegionSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesPatchRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesPatchRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesPatchRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesPatchRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesPatchRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesPatchRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.patchRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a rule at the specified priority. To clear fields in the rule, leave the fields empty and specify them in the updateMask.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", + // "httpMethod": "POST", + // "id": "compute.regionSecurityPolicies.patchRule", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to patch.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "updateMask": { + // "description": "Indicates fields to be cleared as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "validateOnly": { + // "description": "If true, the request will not be committed.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/patchRule", + // "request": { + // "$ref": "SecurityPolicyRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionSecurityPolicies.removeRule": + +type RegionSecurityPoliciesRemoveRuleCall struct { + s *Service + project string + region string + securityPolicy string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveRule: Deletes a rule at the specified priority. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - securityPolicy: Name of the security policy to update. +func (r *RegionSecurityPoliciesService) RemoveRule(project string, region string, securityPolicy string) *RegionSecurityPoliciesRemoveRuleCall { + c := &RegionSecurityPoliciesRemoveRuleCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.securityPolicy = securityPolicy + return c +} + +// Priority sets the optional parameter "priority": The priority of the +// rule to remove from the security policy. +func (c *RegionSecurityPoliciesRemoveRuleCall) Priority(priority int64) *RegionSecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("priority", fmt.Sprint(priority)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionSecurityPoliciesRemoveRuleCall) Fields(s ...googleapi.Field) *RegionSecurityPoliciesRemoveRuleCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionSecurityPoliciesRemoveRuleCall) Context(ctx context.Context) *RegionSecurityPoliciesRemoveRuleCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionSecurityPoliciesRemoveRuleCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionSecurityPoliciesRemoveRuleCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "securityPolicy": c.securityPolicy, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionSecurityPolicies.removeRule" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionSecurityPoliciesRemoveRuleCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes a rule at the specified priority.", + // "flatPath": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", + // "httpMethod": "POST", + // "id": "compute.regionSecurityPolicies.removeRule", + // "parameterOrder": [ + // "project", + // "region", + // "securityPolicy" + // ], + // "parameters": { + // "priority": { + // "description": "The priority of the rule to remove from the security policy.", + // "format": "int32", + // "location": "query", + // "type": "integer" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "securityPolicy": { + // "description": "Name of the security policy to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/securityPolicies/{securityPolicy}/removeRule", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.regionSslCertificates.delete": type RegionSslCertificatesDeleteCall struct { @@ -160892,22 +167009,21 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -160922,7 +167038,8 @@ func (r *RegionSslCertificatesService) List(project string, region string) *Regi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionSslCertificatesListCall) Filter(filter string) *RegionSslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -161080,7 +167197,7 @@ func (c *RegionSslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -161714,22 +167831,21 @@ func (r *RegionSslPoliciesService) List(project string, region string) *RegionSs // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -161744,7 +167860,8 @@ func (r *RegionSslPoliciesService) List(project string, region string) *RegionSs // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionSslPoliciesListCall) Filter(filter string) *RegionSslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -161902,7 +168019,7 @@ func (c *RegionSslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPolici // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -162006,22 +168123,21 @@ func (r *RegionSslPoliciesService) ListAvailableFeatures(project string, region // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -162036,7 +168152,8 @@ func (r *RegionSslPoliciesService) ListAvailableFeatures(project string, region // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionSslPoliciesListAvailableFeaturesCall) Filter(filter string) *RegionSslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -162196,7 +168313,7 @@ func (c *RegionSslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -162996,22 +169113,21 @@ func (r *RegionTargetHttpProxiesService) List(project string, region string) *Re // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -163026,7 +169142,8 @@ func (r *RegionTargetHttpProxiesService) List(project string, region string) *Re // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionTargetHttpProxiesListCall) Filter(filter string) *RegionTargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -163184,7 +169301,7 @@ func (c *RegionTargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*Tar // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -164004,22 +170121,21 @@ func (r *RegionTargetHttpsProxiesService) List(project string, region string) *R // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -164034,7 +170150,8 @@ func (r *RegionTargetHttpsProxiesService) List(project string, region string) *R // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionTargetHttpsProxiesListCall) Filter(filter string) *RegionTargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -164192,7 +170309,7 @@ func (c *RegionTargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*Ta // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -165391,22 +171508,21 @@ func (r *RegionTargetTcpProxiesService) List(project string, region string) *Reg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -165421,7 +171537,8 @@ func (r *RegionTargetTcpProxiesService) List(project string, region string) *Reg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionTargetTcpProxiesListCall) Filter(filter string) *RegionTargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -165579,7 +171696,7 @@ func (c *RegionTargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*Targ // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -166192,22 +172309,21 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -166222,7 +172338,8 @@ func (r *RegionUrlMapsService) List(project string, region string) *RegionUrlMap // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionUrlMapsListCall) Filter(filter string) *RegionUrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -166380,7 +172497,7 @@ func (c *RegionUrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -167185,22 +173302,21 @@ func (r *RegionsService) List(project string) *RegionsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -167215,7 +173331,8 @@ func (r *RegionsService) List(project string) *RegionsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RegionsListCall) Filter(filter string) *RegionsListCall { c.urlParams_.Set("filter", filter) return c @@ -167371,7 +173488,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -167464,22 +173581,21 @@ func (r *ReservationsService) AggregatedList(project string) *ReservationsAggreg // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -167494,7 +173610,8 @@ func (r *ReservationsService) AggregatedList(project string) *ReservationsAggreg // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ReservationsAggregatedListCall) Filter(filter string) *ReservationsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -167555,6 +173672,13 @@ func (c *ReservationsAggregatedListCall) ReturnPartialSuccess(returnPartialSucce return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *ReservationsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *ReservationsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -167663,7 +173787,7 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -167701,6 +173825,11 @@ func (c *ReservationsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Rese // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/reservations", @@ -168478,22 +174607,21 @@ func (r *ReservationsService) List(project string, zone string) *ReservationsLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -168508,7 +174636,8 @@ func (r *ReservationsService) List(project string, zone string) *ReservationsLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ReservationsListCall) Filter(filter string) *ReservationsListCall { c.urlParams_.Set("filter", filter) return c @@ -168666,7 +174795,7 @@ func (c *ReservationsListCall) Do(opts ...googleapi.CallOption) (*ReservationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -169505,22 +175634,21 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -169535,7 +175663,8 @@ func (r *ResourcePoliciesService) AggregatedList(project string) *ResourcePolici // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ResourcePoliciesAggregatedListCall) Filter(filter string) *ResourcePoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -169596,6 +175725,13 @@ func (c *ResourcePoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *ResourcePoliciesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *ResourcePoliciesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -169704,7 +175840,7 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -169742,6 +175878,11 @@ func (c *ResourcePoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/resourcePolicies", @@ -170518,22 +176659,21 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -170548,7 +176688,8 @@ func (r *ResourcePoliciesService) List(project string, region string) *ResourceP // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ResourcePoliciesListCall) Filter(filter string) *ResourcePoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -170706,7 +176847,7 @@ func (c *ResourcePoliciesListCall) Do(opts ...googleapi.CallOption) (*ResourcePo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -171344,22 +177485,21 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -171374,7 +177514,8 @@ func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -171435,6 +177576,13 @@ func (c *RoutersAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bo return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *RoutersAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *RoutersAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -171543,7 +177691,7 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -171581,6 +177729,11 @@ func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAgg // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/routers", @@ -171967,6 +178120,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } +// method id "compute.routers.getNatIpInfo": + +type RoutersGetNatIpInfoCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNatIpInfo: Retrieves runtime NAT IP information. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat IP +// information. The name should conform to RFC1035. +func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { + c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the NAT IP information. If it is omitted, all nats +// for this router will be returned. Name should conform to RFC1035. +func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetNatIpInfoCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.getNatIpInfo" call. +// Exactly one of *NatIpInfoResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NatIpInfoResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NatIpInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves runtime NAT IP information.", + // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "httpMethod": "GET", + // "id": "compute.routers.getNatIpInfo", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "natName": { + // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "response": { + // "$ref": "NatIpInfoResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.routers.getNatMappingInfo": type RoutersGetNatMappingInfoCall struct { @@ -171999,22 +178337,21 @@ func (r *RoutersService) GetNatMappingInfo(project string, region string, router // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -172029,7 +178366,8 @@ func (r *RoutersService) GetNatMappingInfo(project string, region string, router // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RoutersGetNatMappingInfoCall) Filter(filter string) *RoutersGetNatMappingInfoCall { c.urlParams_.Set("filter", filter) return c @@ -172198,7 +178536,7 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -172664,22 +179002,21 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -172694,7 +179031,8 @@ func (r *RoutersService) List(project string, region string) *RoutersListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RoutersListCall) Filter(filter string) *RoutersListCall { c.urlParams_.Set("filter", filter) return c @@ -172852,7 +179190,7 @@ func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -173996,22 +180334,21 @@ func (r *RoutesService) List(project string) *RoutesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -174026,7 +180363,8 @@ func (r *RoutesService) List(project string) *RoutesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *RoutesListCall) Filter(filter string) *RoutesListCall { c.urlParams_.Set("filter", filter) return c @@ -174182,7 +180520,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -174443,22 +180781,21 @@ func (r *SecurityPoliciesService) AggregatedList(project string) *SecurityPolici // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -174473,7 +180810,8 @@ func (r *SecurityPoliciesService) AggregatedList(project string) *SecurityPolici // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SecurityPoliciesAggregatedListCall) Filter(filter string) *SecurityPoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -174534,6 +180872,13 @@ func (c *SecurityPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *SecurityPoliciesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *SecurityPoliciesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -174642,7 +180987,7 @@ func (c *SecurityPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -174680,6 +181025,11 @@ func (c *SecurityPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/securityPolicies", @@ -175419,22 +181769,21 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -175449,7 +181798,8 @@ func (r *SecurityPoliciesService) List(project string) *SecurityPoliciesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SecurityPoliciesListCall) Filter(filter string) *SecurityPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -175605,7 +181955,7 @@ func (c *SecurityPoliciesListCall) Do(opts ...googleapi.CallOption) (*SecurityPo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -175699,22 +182049,21 @@ func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -175729,7 +182078,8 @@ func (r *SecurityPoliciesService) ListPreconfiguredExpressionSets(project string // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Filter(filter string) *SecurityPoliciesListPreconfiguredExpressionSetsCall { c.urlParams_.Set("filter", filter) return c @@ -175888,7 +182238,7 @@ func (c *SecurityPoliciesListPreconfiguredExpressionSetsCall) Do(opts ...googlea // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -175980,6 +182330,13 @@ func (c *SecurityPoliciesPatchCall) RequestId(requestId string) *SecurityPolicie return c } +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be cleared as part of this request. +func (c *SecurityPoliciesPatchCall) UpdateMask(updateMask string) *SecurityPoliciesPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -176099,6 +182456,12 @@ func (c *SecurityPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*Operation // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" + // }, + // "updateMask": { + // "description": "Indicates fields to be cleared as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/global/securityPolicies/{securityPolicy}", @@ -176149,6 +182512,13 @@ func (c *SecurityPoliciesPatchRuleCall) Priority(priority int64) *SecurityPolici return c } +// UpdateMask sets the optional parameter "updateMask": Indicates fields +// to be cleared as part of this request. +func (c *SecurityPoliciesPatchRuleCall) UpdateMask(updateMask string) *SecurityPoliciesPatchRuleCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // ValidateOnly sets the optional parameter "validateOnly": If true, the // request will not be committed. func (c *SecurityPoliciesPatchRuleCall) ValidateOnly(validateOnly bool) *SecurityPoliciesPatchRuleCall { @@ -176277,6 +182647,12 @@ func (c *SecurityPoliciesPatchRuleCall) Do(opts ...googleapi.CallOption) (*Opera // "required": true, // "type": "string" // }, + // "updateMask": { + // "description": "Indicates fields to be cleared as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, // "validateOnly": { // "description": "If true, the request will not be committed.", // "location": "query", @@ -176637,22 +183013,21 @@ func (r *ServiceAttachmentsService) AggregatedList(project string) *ServiceAttac // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -176667,7 +183042,8 @@ func (r *ServiceAttachmentsService) AggregatedList(project string) *ServiceAttac // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ServiceAttachmentsAggregatedListCall) Filter(filter string) *ServiceAttachmentsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -176728,6 +183104,13 @@ func (c *ServiceAttachmentsAggregatedListCall) ReturnPartialSuccess(returnPartia return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *ServiceAttachmentsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *ServiceAttachmentsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -176836,7 +183219,7 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -176874,6 +183257,11 @@ func (c *ServiceAttachmentsAggregatedListCall) Do(opts ...googleapi.CallOption) // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/serviceAttachments", @@ -177653,22 +184041,21 @@ func (r *ServiceAttachmentsService) List(project string, region string) *Service // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -177683,7 +184070,8 @@ func (r *ServiceAttachmentsService) List(project string, region string) *Service // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ServiceAttachmentsListCall) Filter(filter string) *ServiceAttachmentsListCall { c.urlParams_.Set("filter", filter) return c @@ -177841,7 +184229,7 @@ func (c *ServiceAttachmentsListCall) Do(opts ...googleapi.CallOption) (*ServiceA // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -178445,30 +184833,172 @@ func (c *ServiceAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti } -// method id "compute.snapshots.delete": +// method id "compute.snapshotSettings.get": -type SnapshotsDeleteCall struct { - s *Service - project string - snapshot string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type SnapshotSettingsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified Snapshot resource. Keep in mind that -// deleting a single snapshot might not necessarily delete all the data -// on that snapshot. If any data on the snapshot that is marked for -// deletion is needed for subsequent snapshots, the data will be moved -// to the next corresponding snapshot. For more information, see -// Deleting snapshots. +// Get: Get snapshot settings. // // - project: Project ID for this request. -// - snapshot: Name of the Snapshot resource to delete. -func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { - c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *SnapshotSettingsService) Get(project string) *SnapshotSettingsGetCall { + c := &SnapshotSettingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.snapshot = snapshot + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotSettingsGetCall) Fields(s ...googleapi.Field) *SnapshotSettingsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotSettingsGetCall) IfNoneMatch(entityTag string) *SnapshotSettingsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotSettingsGetCall) Context(ctx context.Context) *SnapshotSettingsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotSettingsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotSettingsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshotSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.snapshotSettings.get" call. +// Exactly one of *SnapshotSettings or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SnapshotSettings.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SnapshotSettingsGetCall) Do(opts ...googleapi.CallOption) (*SnapshotSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SnapshotSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get snapshot settings.", + // "flatPath": "projects/{project}/global/snapshotSettings", + // "httpMethod": "GET", + // "id": "compute.snapshotSettings.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/snapshotSettings", + // "response": { + // "$ref": "SnapshotSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.snapshotSettings.patch": + +type SnapshotSettingsPatchCall struct { + s *Service + project string + snapshotsettings *SnapshotSettings + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patch snapshot settings. +// +// - project: Project ID for this request. +func (r *SnapshotSettingsService) Patch(project string, snapshotsettings *SnapshotSettings) *SnapshotSettingsPatchCall { + c := &SnapshotSettingsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshotsettings = snapshotsettings return c } @@ -178483,15 +185013,22 @@ func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDel // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { +func (c *SnapshotSettingsPatchCall) RequestId(requestId string) *SnapshotSettingsPatchCall { c.urlParams_.Set("requestId", requestId) return c } +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *SnapshotSettingsPatchCall) UpdateMask(updateMask string) *SnapshotSettingsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { +func (c *SnapshotSettingsPatchCall) Fields(s ...googleapi.Field) *SnapshotSettingsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -178499,21 +185036,21 @@ func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { +func (c *SnapshotSettingsPatchCall) Context(ctx context.Context) *SnapshotSettingsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *SnapshotsDeleteCall) Header() http.Header { +func (c *SnapshotSettingsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *SnapshotSettingsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -178521,30 +185058,206 @@ func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshotsettings) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshots/{snapshot}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshotSettings") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "snapshot": c.snapshot, + "project": c.project, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.snapshots.delete" call. +// Do executes the "compute.snapshotSettings.patch" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *SnapshotSettingsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patch snapshot settings.", + // "flatPath": "projects/{project}/global/snapshotSettings", + // "httpMethod": "PATCH", + // "id": "compute.snapshotSettings.patch", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/snapshotSettings", + // "request": { + // "$ref": "SnapshotSettings" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.snapshots.delete": + +type SnapshotsDeleteCall struct { + s *Service + project string + snapshot string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified Snapshot resource. Keep in mind that +// deleting a single snapshot might not necessarily delete all the data +// on that snapshot. If any data on the snapshot that is marked for +// deletion is needed for subsequent snapshots, the data will be moved +// to the next corresponding snapshot. For more information, see +// Deleting snapshots. +// +// - project: Project ID for this request. +// - snapshot: Name of the Snapshot resource to delete. +func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall { + c := &SnapshotsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshot = snapshot + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *SnapshotsDeleteCall) RequestId(requestId string) *SnapshotsDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshots/{snapshot}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "snapshot": c.snapshot, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.snapshots.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -179143,22 +185856,21 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -179173,7 +185885,8 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall { c.urlParams_.Set("filter", filter) return c @@ -179329,7 +186042,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -179892,22 +186605,21 @@ func (r *SslCertificatesService) AggregatedList(project string) *SslCertificates // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -179922,7 +186634,8 @@ func (r *SslCertificatesService) AggregatedList(project string) *SslCertificates // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SslCertificatesAggregatedListCall) Filter(filter string) *SslCertificatesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -179983,6 +186696,13 @@ func (c *SslCertificatesAggregatedListCall) ReturnPartialSuccess(returnPartialSu return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *SslCertificatesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *SslCertificatesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -180091,7 +186811,7 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -180129,6 +186849,11 @@ func (c *SslCertificatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*S // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/sslCertificates", @@ -180681,22 +187406,21 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -180711,7 +187435,8 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SslCertificatesListCall) Filter(filter string) *SslCertificatesListCall { c.urlParams_.Set("filter", filter) return c @@ -180867,7 +187592,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -180961,22 +187686,21 @@ func (r *SslPoliciesService) AggregatedList(project string) *SslPoliciesAggregat // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -180991,7 +187715,8 @@ func (r *SslPoliciesService) AggregatedList(project string) *SslPoliciesAggregat // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SslPoliciesAggregatedListCall) Filter(filter string) *SslPoliciesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -181052,6 +187777,13 @@ func (c *SslPoliciesAggregatedListCall) ReturnPartialSuccess(returnPartialSucces return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *SslPoliciesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *SslPoliciesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -181160,7 +187892,7 @@ func (c *SslPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslPo // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -181198,6 +187930,11 @@ func (c *SslPoliciesAggregatedListCall) Do(opts ...googleapi.CallOption) (*SslPo // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/sslPolicies", @@ -181752,22 +188489,21 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -181782,7 +188518,8 @@ func (r *SslPoliciesService) List(project string) *SslPoliciesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SslPoliciesListCall) Filter(filter string) *SslPoliciesListCall { c.urlParams_.Set("filter", filter) return c @@ -181938,7 +188675,7 @@ func (c *SslPoliciesListCall) Do(opts ...googleapi.CallOption) (*SslPoliciesList // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -182032,22 +188769,21 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -182062,7 +188798,8 @@ func (r *SslPoliciesService) ListAvailableFeatures(project string) *SslPoliciesL // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SslPoliciesListAvailableFeaturesCall) Filter(filter string) *SslPoliciesListAvailableFeaturesCall { c.urlParams_.Set("filter", filter) return c @@ -182220,7 +188957,7 @@ func (c *SslPoliciesListAvailableFeaturesCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -182469,22 +189206,21 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -182499,7 +189235,8 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SubnetworksAggregatedListCall) Filter(filter string) *SubnetworksAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -182560,6 +189297,13 @@ func (c *SubnetworksAggregatedListCall) ReturnPartialSuccess(returnPartialSucces return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *SubnetworksAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *SubnetworksAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -182668,7 +189412,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -182706,6 +189450,11 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/subnetworks", @@ -183672,22 +190421,21 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -183702,7 +190450,8 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SubnetworksListCall) Filter(filter string) *SubnetworksListCall { c.urlParams_.Set("filter", filter) return c @@ -183860,7 +190609,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -183961,22 +190710,21 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -183991,7 +190739,8 @@ func (r *SubnetworksService) ListUsable(project string) *SubnetworksListUsableCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *SubnetworksListUsableCall) Filter(filter string) *SubnetworksListUsableCall { c.urlParams_.Set("filter", filter) return c @@ -184147,7 +190896,7 @@ func (c *SubnetworksListUsableCall) Do(opts ...googleapi.CallOption) (*UsableSub // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -185471,22 +192220,21 @@ func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -185501,7 +192249,8 @@ func (r *TargetGrpcProxiesService) List(project string) *TargetGrpcProxiesListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetGrpcProxiesListCall) Filter(filter string) *TargetGrpcProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -185657,7 +192406,7 @@ func (c *TargetGrpcProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetGrp // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -185929,22 +192678,21 @@ func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpPro // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -185959,7 +192707,8 @@ func (r *TargetHttpProxiesService) AggregatedList(project string) *TargetHttpPro // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetHttpProxiesAggregatedListCall) Filter(filter string) *TargetHttpProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -186020,6 +192769,13 @@ func (c *TargetHttpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartial return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetHttpProxiesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetHttpProxiesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -186128,7 +192884,7 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -186166,6 +192922,11 @@ func (c *TargetHttpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetHttpProxies", @@ -186718,22 +193479,21 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -186748,7 +193508,8 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -186904,7 +193665,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -187352,22 +194113,21 @@ func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsP // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -187382,7 +194142,8 @@ func (r *TargetHttpsProxiesService) AggregatedList(project string) *TargetHttpsP // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetHttpsProxiesAggregatedListCall) Filter(filter string) *TargetHttpsProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -187443,6 +194204,13 @@ func (c *TargetHttpsProxiesAggregatedListCall) ReturnPartialSuccess(returnPartia return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetHttpsProxiesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetHttpsProxiesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -187551,7 +194319,7 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -187589,6 +194357,11 @@ func (c *TargetHttpsProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetHttpsProxies", @@ -188141,22 +194914,21 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -188171,7 +194943,8 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetHttpsProxiesListCall) Filter(filter string) *TargetHttpsProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -188327,7 +195100,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -189486,22 +196259,21 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -189516,7 +196288,8 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -189577,6 +196350,13 @@ func (c *TargetInstancesAggregatedListCall) ReturnPartialSuccess(returnPartialSu return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetInstancesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetInstancesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -189685,7 +196465,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -189723,6 +196503,11 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetInstances", @@ -190242,13 +197027,489 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation } return ret, nil // { - // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/targetInstances", + // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/targetInstances", + // "httpMethod": "POST", + // "id": "compute.targetInstances.insert", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/targetInstances", + // "request": { + // "$ref": "TargetInstance" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetInstances.list": + +type TargetInstancesListCall struct { + s *Service + project string + zone string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of TargetInstance resources available to the +// specified project and zone. +// +// - project: Project ID for this request. +// - zone: Name of the zone scoping this request. +func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { + c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetInstances.list" call. +// Exactly one of *TargetInstanceList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *TargetInstanceList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TargetInstanceList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", + // "flatPath": "projects/{project}/zones/{zone}/targetInstances", + // "httpMethod": "GET", + // "id": "compute.targetInstances.list", + // "parameterOrder": [ + // "project", + // "zone" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // }, + // "zone": { + // "description": "Name of the zone scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/targetInstances", + // "response": { + // "$ref": "TargetInstanceList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.targetInstances.setSecurityPolicy": + +type TargetInstancesSetSecurityPolicyCall struct { + s *Service + project string + zone string + targetInstance string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified target instance. For more information, see Google Cloud +// Armor Overview +// +// - project: Project ID for this request. +// - targetInstance: Name of the TargetInstance resource to which the +// security policy should be set. The name should conform to RFC1035. +// - zone: Name of the zone scoping this request. +func (r *TargetInstancesService) SetSecurityPolicy(project string, zone string, targetInstance string, securitypolicyreference *SecurityPolicyReference) *TargetInstancesSetSecurityPolicyCall { + c := &TargetInstancesSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.targetInstance = targetInstance + c.securitypolicyreference = securitypolicyreference + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetInstancesSetSecurityPolicyCall) RequestId(requestId string) *TargetInstancesSetSecurityPolicyCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetInstancesSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetInstancesSetSecurityPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetInstancesSetSecurityPolicyCall) Context(ctx context.Context) *TargetInstancesSetSecurityPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetInstancesSetSecurityPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetInstancesSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "targetInstance": c.targetInstance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetInstances.setSecurityPolicy" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetInstancesSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the Google Cloud Armor security policy for the specified target instance. For more information, see Google Cloud Armor Overview", + // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", // "httpMethod": "POST", - // "id": "compute.targetInstances.insert", + // "id": "compute.targetInstances.setSecurityPolicy", // "parameterOrder": [ // "project", - // "zone" + // "zone", + // "targetInstance" // ], // "parameters": { // "project": { @@ -190263,279 +197524,12 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation // "location": "query", // "type": "string" // }, - // "zone": { - // "description": "Name of the zone scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/zones/{zone}/targetInstances", - // "request": { - // "$ref": "TargetInstance" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetInstances.list": - -type TargetInstancesListCall struct { - s *Service - project string - zone string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// List: Retrieves a list of TargetInstance resources available to the -// specified project and zone. -// -// - project: Project ID for this request. -// - zone: Name of the zone scoping this request. -func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall { - c := &TargetInstancesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.zone = zone - return c -} - -// Filter sets the optional parameter "filter": A filter expression that -// filters resources listed in the response. Most Compute resources -// support two types of filter expressions: expressions that support -// regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a -// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` -// or `:`. For example, if you are filtering Compute Engine instances, -// you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` -// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") -// ``` By default, each expression is an `AND` expression. However, you -// can include `AND` and `OR` expressions explicitly. For example: ``` -// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") -// AND (scheduling.automaticRestart = true) ``` If you want to use a -// regular expression, use the `eq` (equal) or `ne` (not equal) operator -// against a single un-parenthesized expression with or without quotes -// or against multiple parenthesized expressions. Examples: `fieldname -// eq unquoted literal` `fieldname eq 'single quoted literal'` -// `fieldname eq "double quoted literal" `(fieldname1 eq literal) -// (fieldname2 ne "literal")` The literal value is interpreted as a -// regular expression using Google RE2 library syntax. The literal value -// must match the entire field. For example, to filter for instances -// that do not end with name "instance", you would use `name ne -// .*instance`. -func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall { - c.urlParams_.Set("filter", filter) - return c -} - -// MaxResults sets the optional parameter "maxResults": The maximum -// number of results per page that should be returned. If the number of -// available results is larger than `maxResults`, Compute Engine returns -// a `nextPageToken` that can be used to get the next page of results in -// subsequent list requests. Acceptable values are `0` to `500`, -// inclusive. (Default: `500`) -func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// OrderBy sets the optional parameter "orderBy": Sorts list results by -// a certain order. By default, results are returned in alphanumerical -// order based on the resource name. You can also sort results in -// descending order based on the creation timestamp using -// `orderBy="creationTimestamp desc". This sorts results based on the -// `creationTimestamp` field in reverse chronological order (newest -// result first). Use this to sort resources like operations so that the -// newest operation is returned first. Currently, only sorting by `name` -// or `creationTimestamp desc` is supported. -func (c *TargetInstancesListCall) OrderBy(orderBy string) *TargetInstancesListCall { - c.urlParams_.Set("orderBy", orderBy) - return c -} - -// PageToken sets the optional parameter "pageToken": Specifies a page -// token to use. Set `pageToken` to the `nextPageToken` returned by a -// previous list request to get the next page of results. -func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// ReturnPartialSuccess sets the optional parameter -// "returnPartialSuccess": Opt-in for partial success behavior which -// provides partial results in case of failure. The default value is -// false. -func (c *TargetInstancesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *TargetInstancesListCall { - c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *TargetInstancesListCall) IfNoneMatch(entityTag string) *TargetInstancesListCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesListCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetInstancesListCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/targetInstances") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "zone": c.zone, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetInstances.list" call. -// Exactly one of *TargetInstanceList or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *TargetInstanceList.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInstanceList, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &TargetInstanceList{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Retrieves a list of TargetInstance resources available to the specified project and zone.", - // "flatPath": "projects/{project}/zones/{zone}/targetInstances", - // "httpMethod": "GET", - // "id": "compute.targetInstances.list", - // "parameterOrder": [ - // "project", - // "zone" - // ], - // "parameters": { - // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "500", - // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "orderBy": { - // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", - // "location": "query", - // "type": "string" - // }, - // "pageToken": { - // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", + // "targetInstance": { + // "description": "Name of the TargetInstance resource to which the security policy should be set. The name should conform to RFC1035.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" // }, - // "returnPartialSuccess": { - // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", - // "location": "query", - // "type": "boolean" - // }, // "zone": { // "description": "Name of the zone scoping this request.", // "location": "path", @@ -190544,40 +197538,21 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/targetInstances", + // "path": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}/setSecurityPolicy", + // "request": { + // "$ref": "SecurityPolicyReference" + // }, // "response": { - // "$ref": "TargetInstanceList" + // "$ref": "Operation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" + // "https://www.googleapis.com/auth/compute" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *TargetInstancesListCall) Pages(ctx context.Context, f func(*TargetInstanceList) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - // method id "compute.targetPools.addHealthCheck": type TargetPoolsAddHealthCheckCall struct { @@ -190978,22 +197953,21 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -191008,7 +197982,8 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -191069,6 +198044,13 @@ func (c *TargetPoolsAggregatedListCall) ReturnPartialSuccess(returnPartialSucces return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetPoolsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetPoolsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -191177,7 +198159,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -191215,6 +198197,11 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetPools", @@ -191976,22 +198963,21 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -192006,7 +198992,8 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall { c.urlParams_.Set("filter", filter) return c @@ -192164,7 +199151,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -192376,205 +199363,407 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Removes health check URL from a target pool.", - // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "description": "Removes health check URL from a target pool.", + // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeHealthCheck", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the target pool to remove health checks from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + // "request": { + // "$ref": "TargetPoolsRemoveHealthCheckRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.removeInstance": + +type TargetPoolsRemoveInstanceCall struct { + s *Service + project string + region string + targetPool string + targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// RemoveInstance: Removes instance URL from a target pool. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to remove instances +// from. +func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { + c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPoolsRemoveInstanceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetPools.removeInstance" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Removes instance URL from a target pool.", + // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "httpMethod": "POST", + // "id": "compute.targetPools.removeInstance", + // "parameterOrder": [ + // "project", + // "region", + // "targetPool" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "targetPool": { + // "description": "Name of the TargetPool resource to remove instances from.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "request": { + // "$ref": "TargetPoolsRemoveInstanceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.targetPools.setBackup": + +type TargetPoolsSetBackupCall struct { + s *Service + project string + region string + targetPool string + targetreference *TargetReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetBackup: Changes a backup target pool's configurations. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +// - targetPool: Name of the TargetPool resource to set a backup pool +// for. +func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { + c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.targetPool = targetPool + c.targetreference = targetreference + return c +} + +// FailoverRatio sets the optional parameter "failoverRatio": New +// failoverRatio value for the target pool. +func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { + c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBackupCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *TargetPoolsSetBackupCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "targetPool": c.targetPool, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.targetPools.setBackup" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Changes a backup target pool's configurations.", + // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", // "httpMethod": "POST", - // "id": "compute.targetPools.removeHealthCheck", + // "id": "compute.targetPools.setBackup", // "parameterOrder": [ // "project", // "region", // "targetPool" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region for this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "failoverRatio": { + // "description": "New failoverRatio value for the target pool.", + // "format": "float", // "location": "query", - // "type": "string" + // "type": "number" // }, - // "targetPool": { - // "description": "Name of the target pool to remove health checks from.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", - // "request": { - // "$ref": "TargetPoolsRemoveHealthCheckRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - -// method id "compute.targetPools.removeInstance": - -type TargetPoolsRemoveInstanceCall struct { - s *Service - project string - region string - targetPool string - targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// RemoveInstance: Removes instance URL from a target pool. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to remove instances -// from. -func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall { - c := &TargetPoolsRemoveInstanceCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.targetPool = targetPool - c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsRemoveInstanceCall) RequestId(requestId string) *TargetPoolsRemoveInstanceCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPoolsRemoveInstanceCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *TargetPoolsRemoveInstanceCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "targetPool": c.targetPool, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.targetPools.removeInstance" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Removes instance URL from a target pool.", - // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", - // "httpMethod": "POST", - // "id": "compute.targetPools.removeInstance", - // "parameterOrder": [ - // "project", - // "region", - // "targetPool" - // ], - // "parameters": { // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -192595,16 +199784,16 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera // "type": "string" // }, // "targetPool": { - // "description": "Name of the TargetPool resource to remove instances from.", + // "description": "Name of the TargetPool resource to set a backup pool for.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", // "request": { - // "$ref": "TargetPoolsRemoveInstanceRequest" + // "$ref": "TargetReference" // }, // "response": { // "$ref": "Operation" @@ -192617,38 +199806,33 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera } -// method id "compute.targetPools.setBackup": +// method id "compute.targetPools.setSecurityPolicy": -type TargetPoolsSetBackupCall struct { - s *Service - project string - region string - targetPool string - targetreference *TargetReference - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type TargetPoolsSetSecurityPolicyCall struct { + s *Service + project string + region string + targetPool string + securitypolicyreference *SecurityPolicyReference + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetBackup: Changes a backup target pool's configurations. +// SetSecurityPolicy: Sets the Google Cloud Armor security policy for +// the specified target pool. For more information, see Google Cloud +// Armor Overview // // - project: Project ID for this request. // - region: Name of the region scoping this request. -// - targetPool: Name of the TargetPool resource to set a backup pool -// for. -func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall { - c := &TargetPoolsSetBackupCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - targetPool: Name of the TargetPool resource to which the security +// policy should be set. The name should conform to RFC1035. +func (r *TargetPoolsService) SetSecurityPolicy(project string, region string, targetPool string, securitypolicyreference *SecurityPolicyReference) *TargetPoolsSetSecurityPolicyCall { + c := &TargetPoolsSetSecurityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.targetPool = targetPool - c.targetreference = targetreference - return c -} - -// FailoverRatio sets the optional parameter "failoverRatio": New -// failoverRatio value for the target pool. -func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall { - c.urlParams_.Set("failoverRatio", fmt.Sprint(failoverRatio)) + c.securitypolicyreference = securitypolicyreference return c } @@ -192663,7 +199847,7 @@ func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetP // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBackupCall { +func (c *TargetPoolsSetSecurityPolicyCall) RequestId(requestId string) *TargetPoolsSetSecurityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -192671,7 +199855,7 @@ func (c *TargetPoolsSetBackupCall) RequestId(requestId string) *TargetPoolsSetBa // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall { +func (c *TargetPoolsSetSecurityPolicyCall) Fields(s ...googleapi.Field) *TargetPoolsSetSecurityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -192679,21 +199863,21 @@ func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetB // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetBackupCall { +func (c *TargetPoolsSetSecurityPolicyCall) Context(ctx context.Context) *TargetPoolsSetSecurityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *TargetPoolsSetBackupCall) Header() http.Header { +func (c *TargetPoolsSetSecurityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { +func (c *TargetPoolsSetSecurityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -192701,14 +199885,14 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.securitypolicyreference) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -192723,14 +199907,14 @@ func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.targetPools.setBackup" call. +// Do executes the "compute.targetPools.setSecurityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *TargetPoolsSetSecurityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -192761,22 +199945,16 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, } return ret, nil // { - // "description": "Changes a backup target pool's configurations.", - // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "description": "Sets the Google Cloud Armor security policy for the specified target pool. For more information, see Google Cloud Armor Overview", + // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", // "httpMethod": "POST", - // "id": "compute.targetPools.setBackup", + // "id": "compute.targetPools.setSecurityPolicy", // "parameterOrder": [ // "project", // "region", // "targetPool" // ], // "parameters": { - // "failoverRatio": { - // "description": "New failoverRatio value for the target pool.", - // "format": "float", - // "location": "query", - // "type": "number" - // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -192797,16 +199975,15 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "targetPool": { - // "description": "Name of the TargetPool resource to set a backup pool for.", + // "description": "Name of the TargetPool resource to which the security policy should be set. The name should conform to RFC1035.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setBackup", + // "path": "projects/{project}/regions/{region}/targetPools/{targetPool}/setSecurityPolicy", // "request": { - // "$ref": "TargetReference" + // "$ref": "SecurityPolicyReference" // }, // "response": { // "$ref": "Operation" @@ -193335,22 +200512,21 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -193365,7 +200541,8 @@ func (r *TargetSslProxiesService) List(project string) *TargetSslProxiesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetSslProxiesListCall) Filter(filter string) *TargetSslProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -193521,7 +200698,7 @@ func (c *TargetSslProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetSslP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -194503,22 +201680,21 @@ func (r *TargetTcpProxiesService) AggregatedList(project string) *TargetTcpProxi // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -194533,7 +201709,8 @@ func (r *TargetTcpProxiesService) AggregatedList(project string) *TargetTcpProxi // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetTcpProxiesAggregatedListCall) Filter(filter string) *TargetTcpProxiesAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -194594,6 +201771,13 @@ func (c *TargetTcpProxiesAggregatedListCall) ReturnPartialSuccess(returnPartialS return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetTcpProxiesAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetTcpProxiesAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -194702,7 +201886,7 @@ func (c *TargetTcpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -194740,6 +201924,11 @@ func (c *TargetTcpProxiesAggregatedListCall) Do(opts ...googleapi.CallOption) (* // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetTcpProxies", @@ -195292,22 +202481,21 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -195322,7 +202510,8 @@ func (r *TargetTcpProxiesService) List(project string) *TargetTcpProxiesListCall // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetTcpProxiesListCall) Filter(filter string) *TargetTcpProxiesListCall { c.urlParams_.Set("filter", filter) return c @@ -195478,7 +202667,7 @@ func (c *TargetTcpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetTcpP // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -195925,22 +203114,21 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -195955,7 +203143,8 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetVpnGatewaysAggregatedListCall) Filter(filter string) *TargetVpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -196016,6 +203205,13 @@ func (c *TargetVpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartial return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *TargetVpnGatewaysAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *TargetVpnGatewaysAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -196124,7 +203320,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -196162,6 +203358,11 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/targetVpnGateways", @@ -196753,22 +203954,21 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -196783,7 +203983,8 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *TargetVpnGatewaysListCall) Filter(filter string) *TargetVpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -196941,7 +204142,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -197231,22 +204432,21 @@ func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCa // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -197261,7 +204461,8 @@ func (r *UrlMapsService) AggregatedList(project string) *UrlMapsAggregatedListCa // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *UrlMapsAggregatedListCall) Filter(filter string) *UrlMapsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -197322,6 +204523,13 @@ func (c *UrlMapsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bo return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *UrlMapsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *UrlMapsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -197430,7 +204638,7 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -197468,6 +204676,11 @@ func (c *UrlMapsAggregatedListCall) Do(opts ...googleapi.CallOption) (*UrlMapsAg // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/urlMaps", @@ -198199,22 +205412,21 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -198229,7 +205441,8 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall { c.urlParams_.Set("filter", filter) return c @@ -198385,7 +205598,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -198990,22 +206203,21 @@ func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregat // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -199020,7 +206232,8 @@ func (r *VpnGatewaysService) AggregatedList(project string) *VpnGatewaysAggregat // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *VpnGatewaysAggregatedListCall) Filter(filter string) *VpnGatewaysAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -199081,6 +206294,13 @@ func (c *VpnGatewaysAggregatedListCall) ReturnPartialSuccess(returnPartialSucces return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *VpnGatewaysAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *VpnGatewaysAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -199189,7 +206409,7 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -199227,6 +206447,11 @@ func (c *VpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnGa // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/vpnGateways", @@ -199990,22 +207215,21 @@ func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysLis // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -200020,7 +207244,8 @@ func (r *VpnGatewaysService) List(project string, region string) *VpnGatewaysLis // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *VpnGatewaysListCall) Filter(filter string) *VpnGatewaysListCall { c.urlParams_.Set("filter", filter) return c @@ -200178,7 +207403,7 @@ func (c *VpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*VpnGatewayList, // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -200636,22 +207861,21 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -200666,7 +207890,8 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *VpnTunnelsAggregatedListCall) Filter(filter string) *VpnTunnelsAggregatedListCall { c.urlParams_.Set("filter", filter) return c @@ -200727,6 +207952,13 @@ func (c *VpnTunnelsAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess return c } +// ServiceProjectNumber sets the optional parameter +// "serviceProjectNumber": +func (c *VpnTunnelsAggregatedListCall) ServiceProjectNumber(serviceProjectNumber int64) *VpnTunnelsAggregatedListCall { + c.urlParams_.Set("serviceProjectNumber", fmt.Sprint(serviceProjectNumber)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -200835,7 +208067,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -200873,6 +208105,11 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", // "location": "query", // "type": "boolean" + // }, + // "serviceProjectNumber": { + // "format": "int64", + // "location": "query", + // "type": "string" // } // }, // "path": "projects/{project}/aggregated/vpnTunnels", @@ -201464,22 +208701,21 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -201494,7 +208730,8 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *VpnTunnelsListCall) Filter(filter string) *VpnTunnelsListCall { c.urlParams_.Set("filter", filter) return c @@ -201652,7 +208889,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -202246,22 +209483,21 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -202276,7 +209512,8 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall { c.urlParams_.Set("filter", filter) return c @@ -202434,7 +209671,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, @@ -202863,22 +210100,21 @@ func (r *ZonesService) List(project string) *ZonesListCall { // filters resources listed in the response. Most Compute resources // support two types of filter expressions: expressions that support // regular expressions and expressions that follow API improvement -// proposal AIP-160. If you want to use AIP-160, your expression must -// specify the field name, an operator, and the value that you want to -// use for filtering. The value must be a string, a number, or a +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a // boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` // or `:`. For example, if you are filtering Compute Engine instances, // you can exclude instances named `example-instance` by specifying -// `name != example-instance`. The `:` operator can be used with string -// fields to match substrings. For non-string fields it is equivalent to -// the `=` operator. The `:*` comparison can be used to test whether a -// key has been defined. For example, to find all objects with `owner` -// label use: ``` labels.owner:* ``` You can also filter nested fields. -// For example, you could specify `scheduling.automaticRestart = false` -// to include instances only if they are not scheduled for automatic -// restarts. You can use filtering on nested fields to filter based on -// resource labels. To filter on multiple expressions, provide each -// separate expression within parentheses. For example: ``` +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` // (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") // ``` By default, each expression is an `AND` expression. However, you // can include `AND` and `OR` expressions explicitly. For example: ``` @@ -202893,7 +210129,8 @@ func (r *ZonesService) List(project string) *ZonesListCall { // regular expression using Google RE2 library syntax. The literal value // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne -// .*instance`. +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. func (c *ZonesListCall) Filter(filter string) *ZonesListCall { c.urlParams_.Set("filter", filter) return c @@ -203049,7 +210286,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 0a6304d51dcc4..5dfff4cb2cc8d 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -8,6 +8,17 @@ // // For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,24 +28,26 @@ // ctx := context.Background() // iamcredentialsService, err := iamcredentials.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package iamcredentials // import "google.golang.org/api/iamcredentials/v1" import ( diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index cecbb9ba115b6..829383f55b502 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { s2aMTLSEndpoint: "", } - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { + if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } s2aMTLSEndpoint := settings.DefaultMTLSEndpoint // If there is endpoint override, honor it. if settings.Endpoint != "" { @@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { }, nil } -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - // getClientCertificateSource returns a default client certificate source, if // not provided by the user. // @@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil } +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" { + return false + } + // If MTLS is not enabled for this endpoint, skip S2A. + if !mtlsEndpointEnabledForS2A() { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return true +} + // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. var mtlsEndpointEnabledForS2A = func() bool { // TODO(xmenxk): determine this via discovery config. return true } + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 92b3acf6edf23..05165f333b083 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -78,9 +78,8 @@ const ( // met: // // (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users // (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go index c5b421f5544e8..c70f2419b492a 100644 --- a/vendor/google.golang.org/api/internal/s2a.go +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/compute/metadata" ) -const configEndpointSuffix = "googleAutoMtlsConfiguration" +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" // The period an MTLS config can be reused before needing refresh. var configExpiry = time.Hour diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 3a3874df112ad..84f9302dcfa6e 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -9,6 +9,8 @@ import ( "crypto/tls" "errors" "net/http" + "os" + "strconv" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -16,6 +18,10 @@ import ( "google.golang.org/grpc" ) +const ( + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { @@ -47,6 +53,7 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool + EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: @@ -77,6 +84,16 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + return b + } + return false +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 7a7266777bb93..62a5a2386b732 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.132.0" +const Version = "0.150.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 3b8461d1da97a..b2b249eec687d 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -150,6 +150,19 @@ func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } +// EnableNewAuthLibrary returns a ClientOption that specifies if libraries in this +// module to delegate auth to our new library. This option will be removed in +// the future once all clients have been moved to the new auth layer. +func EnableNewAuthLibrary() option.ClientOption { + return enableNewAuthLibrary(true) +} + +type enableNewAuthLibrary bool + +func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { + o.EnableNewAuthLibrary = bool(w) +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 6212071181857..5481a74cbbd40 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"39353535313838393033333032363632303533\"", + "etag": "\"39383633393336373936373236333033393737\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -92,6 +92,242 @@ }, "protocol": "rest", "resources": { + "anywhereCache": { + "methods": { + "disable": { + "description": "Disables an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.disable", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns the metadata of an Anywhere Cache instance.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.get", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items return in a single page of responses. Maximum 1000.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCache", + "response": { + "$ref": "AnywhereCaches" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "pause": { + "description": "Pauses an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.pause", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "resume": { + "description": "Resumes a paused or disabled Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.resume", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", + "httpMethod": "PATCH", + "id": "storage.anywhereCaches.update", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, "bucketAccessControls": { "methods": { "delete": { @@ -446,6 +682,12 @@ "project" ], "parameters": { + "enableObjectRetention": { + "default": "false", + "description": "When set to true, object retention is enabled for this bucket.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this bucket.", "enum": [ @@ -1124,16 +1366,311 @@ "type": "string" } }, - "path": "b/{bucket}/defaultObjectAcl/{entity}", - "request": { - "$ref": "ObjectAccessControl" - }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "managedFolders": { + "methods": { + "delete": { + "description": "Permanently deletes a managed folder.", + "httpMethod": "DELETE", + "id": "storage.managedFolders.delete", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the managed folder if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the managed folder if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata of the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.get", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.getIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new managed folder.", + "httpMethod": "POST", + "id": "storage.managedFolders.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "request": { + "$ref": "ManagedFolder" + }, + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists managed folders in the given bucket.", + "httpMethod": "GET", + "id": "storage.managedFolders.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items return in a single page of responses.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "The managed folder name/path prefix to filter the output list of results.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "response": { + "$ref": "ManagedFolders" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified managed folder.", + "httpMethod": "PUT", + "id": "storage.managedFolders.setIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.managedFolders.testIamPermissions", + "parameterOrder": [ + "bucket", + "managedFolder", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", "response": { - "$ref": "ObjectAccessControl" + "$ref": "TestIamPermissionsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control" + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" ] } } @@ -1572,6 +2109,34 @@ }, "objects": { "methods": { + "bulkRestore": { + "description": "Initiates a long-running bulk restore operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.objects.bulkRestore", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/o/bulkRestore", + "request": { + "$ref": "BulkRestoreObjectsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "compose": { "description": "Concatenates a list of existing objects into a new object in the same bucket.", "httpMethod": "POST", @@ -1925,6 +2490,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2136,6 +2706,11 @@ "location": "query", "type": "string" }, + "includeFoldersAsPrefixes": { + "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + "location": "query", + "type": "boolean" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2177,6 +2752,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "startOffset": { "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", "location": "query", @@ -2257,6 +2837,11 @@ "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2309,6 +2894,94 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "restore": { + "description": "Restores a soft-deleted object.", + "httpMethod": "POST", + "id": "storage.objects.restore", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "location": "query", + "type": "boolean" + }, + "generation": { + "description": "Selects a specific revision of this object.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/restore", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, "rewrite": { "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", "httpMethod": "POST", @@ -2617,6 +3290,11 @@ "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2688,20 +3366,169 @@ "location": "query", "type": "string" }, - "endOffset": { - "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.cancel", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/cancel", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation.", + "httpMethod": "GET", + "id": "storage.buckets.operations.get", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request.", + "httpMethod": "GET", + "id": "storage.buckets.operations.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for operations.", + "location": "path", + "required": true, + "type": "string" + }, + "filter": { + "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", "location": "query", "type": "string" }, - "includeTrailingDelimiter": { - "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "1000", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "format": "uint32", + "pageSize": { + "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + "format": "int32", "location": "query", "minimum": "0", "type": "integer" @@ -2710,48 +3537,11 @@ "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query", "type": "string" - }, - "prefix": { - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "startOffset": { - "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - }, - "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query", - "type": "boolean" } }, - "path": "b/{bucket}/o/watch", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, + "path": "b/{bucket}/operations", "response": { - "$ref": "Channel" + "$ref": "GoogleLongrunningListOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -2759,8 +3549,7 @@ "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true + ] } } }, @@ -3010,9 +3799,87 @@ } } }, - "revision": "20230710", + "revision": "20231028", "rootUrl": "https://storage.googleapis.com/", "schemas": { + "AnywhereCache": { + "description": "An Anywhere Cache instance.", + "id": "AnywhereCache", + "properties": { + "admissionPolicy": { + "description": "The cache-level entry admission policy.", + "type": "string" + }, + "anywhereCacheId": { + "description": "The ID of the Anywhere cache instance.", + "type": "string" + }, + "bucket": { + "description": "The name of the bucket containing this cache instance.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the cache instance in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the resource, including the project number, bucket name and anywhere cache ID.", + "type": "string" + }, + "kind": { + "default": "storage#anywhereCache", + "description": "The kind of item this is. For Anywhere Cache, this is always storage#anywhereCache.", + "type": "string" + }, + "pendingUpdate": { + "description": "True if the cache instance has an active Update long-running operation.", + "type": "boolean" + }, + "selfLink": { + "description": "The link to this cache instance.", + "type": "string" + }, + "state": { + "description": "The current state of the cache instance.", + "type": "string" + }, + "ttl": { + "description": "The TTL of all cache entries in whole seconds. e.g., \"7200s\". ", + "format": "google-duration", + "type": "string" + }, + "updateTime": { + "description": "The modification time of the cache instance metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "AnywhereCaches": { + "description": "A list of Anywhere Caches.", + "id": "AnywhereCaches", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "AnywhereCache" + }, + "type": "array" + }, + "kind": { + "default": "storage#anywhereCaches", + "description": "The kind of item this is. For lists of Anywhere Caches, this is always storage#anywhereCaches.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Bucket": { "description": "A bucket.", "id": "Bucket", @@ -3036,6 +3903,15 @@ "description": "Whether or not Autoclass is enabled on this bucket", "type": "boolean" }, + "terminalStorageClass": { + "description": "The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.", + "type": "string" + }, + "terminalStorageClassUpdateTime": { + "description": "A date and time in RFC 3339 format representing the time of the most recent update to \"terminalStorageClass\".", + "format": "date-time", + "type": "string" + }, "toggleTime": { "description": "A date and time in RFC 3339 format representing the instant at which \"enabled\" was last toggled.", "format": "date-time", @@ -3319,6 +4195,16 @@ "description": "The name of the bucket.", "type": "string" }, + "objectRetention": { + "description": "The bucket's object retention config.", + "properties": { + "mode": { + "description": "The bucket's object retention mode. Can be Enabled.", + "type": "string" + } + }, + "type": "object" + }, "owner": { "description": "The owner of the bucket. This is always the project team's owner group.", "properties": { @@ -3370,6 +4256,22 @@ "description": "The URI of this bucket.", "type": "string" }, + "softDeletePolicy": { + "description": "The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "retentionDurationSeconds": { + "description": "The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "storageClass": { "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", "type": "string" @@ -3525,6 +4427,38 @@ }, "type": "object" }, + "BulkRestoreObjectsRequest": { + "description": "A bulk restore objects request.", + "id": "BulkRestoreObjectsRequest", + "properties": { + "allowOverwrite": { + "description": "If false (default), the restore will not overwrite live objects with the same name at the destination. This means some deleted objects may be skipped. If true, live objects will be overwritten resulting in a noncurrent object (if versioning is enabled). If versioning is not enabled, overwriting the object will result in a soft-deleted object. In either case, if a noncurrent object already exists with the same name, a live version can be written without issue.", + "type": "boolean" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "type": "boolean" + }, + "matchGlobs": { + "description": "Restores only the objects matching any of the specified glob(s). If this parameter is not specified, all objects will be restored within the specified time range.", + "items": { + "type": "string" + }, + "type": "array" + }, + "softDeletedAfterTime": { + "description": "Restores only the objects that were soft-deleted after this time.", + "format": "date-time", + "type": "string" + }, + "softDeletedBeforeTime": { + "description": "Restores only the objects that were soft-deleted before this time.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "Channel": { "description": "An notification channel used to watch for resource changes.", "id": "Channel", @@ -3656,6 +4590,86 @@ }, "type": "object" }, + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for storage.buckets.operations.list.", + "id": "GoogleLongrunningListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "GoogleLongrunningOperation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", + "properties": { + "done": { + "description": "If the value is \"false\", it means the operation is still in progress. If \"true\", the operation is completed, and either \"error\" or \"response\" is available.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the \"name\" should be a resource name ending with \"operations/{operationId}\".", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", + "type": "object" + } + }, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The \"Status\" type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each \"Status\" message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English.", + "type": "string" + } + }, + "type": "object" + }, "HmacKey": { "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", "id": "HmacKey", @@ -3749,6 +4763,72 @@ }, "type": "object" }, + "ManagedFolder": { + "description": "A managed folder.", + "id": "ManagedFolder", + "properties": { + "bucket": { + "description": "The name of the bucket containing this managed folder.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the managed folder in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the managed folder, including the bucket name and managed folder name.", + "type": "string" + }, + "kind": { + "default": "storage#managedFolder", + "description": "The kind of item this is. For managed folders, this is always storage#managedFolder.", + "type": "string" + }, + "metageneration": { + "description": "The version of the metadata for this managed folder. Used for preconditions and for detecting changes in metadata.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the managed folder. Required if not specified by URL parameter.", + "type": "string" + }, + "selfLink": { + "description": "The link to this managed folder.", + "type": "string" + }, + "updateTime": { + "description": "The last update time of the managed folder metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ManagedFolders": { + "description": "A list of managed folders.", + "id": "ManagedFolders", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ManagedFolder" + }, + "type": "array" + }, + "kind": { + "default": "storage#managedFolders", + "description": "The kind of item this is. For lists of managed folders, this is always storage#managedFolders.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Notification": { "description": "A subscription to receive Google PubSub notifications.", "id": "Notification", @@ -3910,6 +4990,11 @@ "format": "int64", "type": "string" }, + "hardDeleteTime": { + "description": "This is the time (in the future) when the soft-deleted object will no longer be restorable. It is equal to the soft delete time plus the current soft delete retention duration of the bucket.", + "format": "date-time", + "type": "string" + }, "id": { "description": "The ID of the object, including the bucket name, object name, and generation number.", "type": "string" @@ -3962,6 +5047,21 @@ }, "type": "object" }, + "retention": { + "description": "A collection of object level retention parameters.", + "properties": { + "mode": { + "description": "The bucket's object retention mode, can only be Unlocked or Locked.", + "type": "string" + }, + "retainUntilTime": { + "description": "A time in RFC 3339 format until which object retention protects this object.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "retentionExpirationTime": { "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", "format": "date-time", @@ -3976,6 +5076,11 @@ "format": "uint64", "type": "string" }, + "softDeleteTime": { + "description": "The time at which the object became soft-deleted in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { "description": "Storage class of the object.", "type": "string" @@ -3990,7 +5095,7 @@ "type": "string" }, "timeDeleted": { - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "description": "The time at which the object became noncurrent in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", "format": "date-time", "type": "string" }, @@ -4140,14 +5245,15 @@ "type": "object" }, "Policy": { - "description": "A bucket/object IAM policy.", + "description": "A bucket/object/managedFolder IAM policy.", "id": "Policy", "properties": { "bindings": { "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", @@ -4161,7 +5267,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", @@ -4174,7 +5281,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", @@ -4196,7 +5304,7 @@ "type": "string" }, "resourceId": { - "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, projects/_/buckets/bucket/objects/object for objects, and projects/_/buckets/bucket/managedFolders/managedFolder. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" }, "version": { @@ -4258,7 +5366,7 @@ "type": "object" }, "TestIamPermissionsResponse": { - "description": "A storage.(buckets|objects).testIamPermissions response.", + "description": "A storage.(buckets|objects|managedFolders).testIamPermissions response.", "id": "TestIamPermissionsResponse", "properties": { "kind": { @@ -4267,7 +5375,7 @@ "type": "string" }, "permissions": { - "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets, objects, or managedFolders. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata. \n- storage.managedFolders.delete — Delete managed folder. \n- storage.managedFolders.get — Read managed folder metadata. \n- storage.managedFolders.getIamPolicy — Read managed folder IAM policy. \n- storage.managedFolders.create — Create managed folder. \n- storage.managedFolders.list — List managed folders. \n- storage.managedFolders.setIamPolicy — Update managed folder IAM policy.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 69a6e41e7cc1c..90320fcc0b5c2 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,6 +10,17 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -19,28 +30,31 @@ // ctx := context.Background() // storageService, err := storage.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package storage // import "google.golang.org/api/storage/v1" import ( @@ -141,13 +155,16 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} + s.AnywhereCache = NewAnywhereCacheService(s) s.BucketAccessControls = NewBucketAccessControlsService(s) s.Buckets = NewBucketsService(s) s.Channels = NewChannelsService(s) s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ManagedFolders = NewManagedFoldersService(s) s.Notifications = NewNotificationsService(s) s.ObjectAccessControls = NewObjectAccessControlsService(s) s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) s.Projects = NewProjectsService(s) return s, nil } @@ -157,6 +174,8 @@ type Service struct { BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment + AnywhereCache *AnywhereCacheService + BucketAccessControls *BucketAccessControlsService Buckets *BucketsService @@ -165,12 +184,16 @@ type Service struct { DefaultObjectAccessControls *DefaultObjectAccessControlsService + ManagedFolders *ManagedFoldersService + Notifications *NotificationsService ObjectAccessControls *ObjectAccessControlsService Objects *ObjectsService + Operations *OperationsService + Projects *ProjectsService } @@ -181,6 +204,15 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func NewAnywhereCacheService(s *Service) *AnywhereCacheService { + rs := &AnywhereCacheService{s: s} + return rs +} + +type AnywhereCacheService struct { + s *Service +} + func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs @@ -217,6 +249,15 @@ type DefaultObjectAccessControlsService struct { s *Service } +func NewManagedFoldersService(s *Service) *ManagedFoldersService { + rs := &ManagedFoldersService{s: s} + return rs +} + +type ManagedFoldersService struct { + s *Service +} + func NewNotificationsService(s *Service) *NotificationsService { rs := &NotificationsService{s: s} return rs @@ -244,6 +285,15 @@ type ObjectsService struct { s *Service } +func NewOperationsService(s *Service) *OperationsService { + rs := &OperationsService{s: s} + return rs +} + +type OperationsService struct { + s *Service +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.HmacKeys = NewProjectsHmacKeysService(s) @@ -277,6 +327,115 @@ type ProjectsServiceAccountService struct { s *Service } +// AnywhereCache: An Anywhere Cache instance. +type AnywhereCache struct { + // AdmissionPolicy: The cache-level entry admission policy. + AdmissionPolicy string `json:"admissionPolicy,omitempty"` + + // AnywhereCacheId: The ID of the Anywhere cache instance. + AnywhereCacheId string `json:"anywhereCacheId,omitempty"` + + // Bucket: The name of the bucket containing this cache instance. + Bucket string `json:"bucket,omitempty"` + + // CreateTime: The creation time of the cache instance in RFC 3339 + // format. + CreateTime string `json:"createTime,omitempty"` + + // Id: The ID of the resource, including the project number, bucket name + // and anywhere cache ID. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For Anywhere Cache, this is always + // storage#anywhereCache. + Kind string `json:"kind,omitempty"` + + // PendingUpdate: True if the cache instance has an active Update + // long-running operation. + PendingUpdate bool `json:"pendingUpdate,omitempty"` + + // SelfLink: The link to this cache instance. + SelfLink string `json:"selfLink,omitempty"` + + // State: The current state of the cache instance. + State string `json:"state,omitempty"` + + // Ttl: The TTL of all cache entries in whole seconds. e.g., "7200s". + Ttl string `json:"ttl,omitempty"` + + // UpdateTime: The modification time of the cache instance metadata in + // RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdmissionPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdmissionPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AnywhereCache) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCache + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AnywhereCaches: A list of Anywhere Caches. +type AnywhereCaches struct { + // Items: The list of items. + Items []*AnywhereCache `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of Anywhere Caches, this is + // always storage#anywhereCaches. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCaches + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Bucket: A bucket. type Bucket struct { // Acl: Access controls on the bucket. @@ -359,6 +518,9 @@ type Bucket struct { // Name: The name of the bucket. Name string `json:"name,omitempty"` + // ObjectRetention: The bucket's object retention config. + ObjectRetention *BucketObjectRetention `json:"objectRetention,omitempty"` + // Owner: The owner of the bucket. This is always the project team's // owner group. Owner *BucketOwner `json:"owner,omitempty"` @@ -389,6 +551,11 @@ type Bucket struct { // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` + // SoftDeletePolicy: The bucket's soft delete policy, which defines the + // period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. + SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the @@ -444,6 +611,16 @@ type BucketAutoclass struct { // Enabled: Whether or not Autoclass is enabled on this bucket Enabled bool `json:"enabled,omitempty"` + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string `json:"terminalStorageClass,omitempty"` + + // TerminalStorageClassUpdateTime: A date and time in RFC 3339 format + // representing the time of the most recent update to + // "terminalStorageClass". + TerminalStorageClassUpdateTime string `json:"terminalStorageClassUpdateTime,omitempty"` + // ToggleTime: A date and time in RFC 3339 format representing the // instant at which "enabled" was last toggled. ToggleTime string `json:"toggleTime,omitempty"` @@ -946,6 +1123,34 @@ func (s *BucketLogging) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketObjectRetention: The bucket's object retention config. +type BucketObjectRetention struct { + // Mode: The bucket's object retention mode. Can be Enabled. + Mode string `json:"mode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod BucketObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketOwner: The owner of the bucket. This is always the project // team's owner group. type BucketOwner struct { @@ -1027,6 +1232,43 @@ func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketSoftDeletePolicy: The bucket's soft delete policy, which +// defines the period of time that soft-deleted objects will be +// retained, and cannot be permanently deleted. +type BucketSoftDeletePolicy struct { + // EffectiveTime: Server-determined value that indicates the time from + // which the policy, or one with a greater retention, was effective. + // This value is in RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + + // RetentionDurationSeconds: The duration in seconds that soft-deleted + // objects in the bucket will be retained and cannot be permanently + // deleted. + RetentionDurationSeconds int64 `json:"retentionDurationSeconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EffectiveTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketSoftDeletePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketVersioning: The bucket's versioning configuration. type BucketVersioning struct { // Enabled: While set to true, versioning is fully enabled for this @@ -1282,6 +1524,59 @@ func (s *Buckets) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BulkRestoreObjectsRequest: A bulk restore objects request. +type BulkRestoreObjectsRequest struct { + // AllowOverwrite: If false (default), the restore will not overwrite + // live objects with the same name at the destination. This means some + // deleted objects may be skipped. If true, live objects will be + // overwritten resulting in a noncurrent object (if versioning is + // enabled). If versioning is not enabled, overwriting the object will + // result in a soft-deleted object. In either case, if a noncurrent + // object already exists with the same name, a live version can be + // written without issue. + AllowOverwrite bool `json:"allowOverwrite,omitempty"` + + // CopySourceAcl: If true, copies the source object's ACL; otherwise, + // uses the bucket's default object ACL. The default is false. + CopySourceAcl bool `json:"copySourceAcl,omitempty"` + + // MatchGlobs: Restores only the objects matching any of the specified + // glob(s). If this parameter is not specified, all objects will be + // restored within the specified time range. + MatchGlobs []string `json:"matchGlobs,omitempty"` + + // SoftDeletedAfterTime: Restores only the objects that were + // soft-deleted after this time. + SoftDeletedAfterTime string `json:"softDeletedAfterTime,omitempty"` + + // SoftDeletedBeforeTime: Restores only the objects that were + // soft-deleted before this time. + SoftDeletedBeforeTime string `json:"softDeletedBeforeTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowOverwrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowOverwrite") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { + type NoMethod BulkRestoreObjectsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Channel: An notification channel used to watch for resource changes. type Channel struct { // Address: The address where notifications are delivered for this @@ -1498,6 +1793,150 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleLongrunningListOperationsResponse: The response message for +// storage.buckets.operations.list. +type GoogleLongrunningListOperationsResponse struct { + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*GoogleLongrunningOperation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningListOperationsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningOperation: This resource represents a long-running +// operation that is the result of a network API call. +type GoogleLongrunningOperation struct { + // Done: If the value is "false", it means the operation is still in + // progress. If "true", the operation is completed, and either "error" + // or "response" is available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *GoogleRpcStatus `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that originally returns it. If you use the default HTTP + // mapping, the "name" should be a resource name ending with + // "operations/{operationId}". + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as "Delete", the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type "XxxResponse", where "Xxx" + // is the original method name. For example, if the original method name + // is "TakeSnapshot()", the inferred response type is + // "TakeSnapshotResponse". + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleRpcStatus: The "Status" type defines a logical error model that +// is suitable for different programming environments, including REST +// APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// "Status" message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HmacKey: JSON template to produce a JSON-style HMAC Key resource for // Create responses. type HmacKey struct { @@ -1645,11 +2084,111 @@ func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Notification: A subscription to receive Google PubSub notifications. -type Notification struct { - // CustomAttributes: An optional list of additional attributes to attach - // to each Cloud PubSub message published for this notification - // subscription. +// ManagedFolder: A managed folder. +type ManagedFolder struct { + // Bucket: The name of the bucket containing this managed folder. + Bucket string `json:"bucket,omitempty"` + + // CreateTime: The creation time of the managed folder in RFC 3339 + // format. + CreateTime string `json:"createTime,omitempty"` + + // Id: The ID of the managed folder, including the bucket name and + // managed folder name. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For managed folders, this is always + // storage#managedFolder. + Kind string `json:"kind,omitempty"` + + // Metageneration: The version of the metadata for this managed folder. + // Used for preconditions and for detecting changes in metadata. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the managed folder. Required if not specified by + // URL parameter. + Name string `json:"name,omitempty"` + + // SelfLink: The link to this managed folder. + SelfLink string `json:"selfLink,omitempty"` + + // UpdateTime: The last update time of the managed folder metadata in + // RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolder) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolder + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ManagedFolders: A list of managed folders. +type ManagedFolders struct { + // Items: The list of items. + Items []*ManagedFolder `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of managed folders, this is + // always storage#managedFolders. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolders) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolders + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach + // to each Cloud PubSub message published for this notification + // subscription. CustomAttributes map[string]string `json:"custom_attributes,omitempty"` // Etag: HTTP 1.1 Entity tag for this subscription notification. @@ -1812,6 +2351,12 @@ type Object struct { // versioning. Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: This is the time (in the future) when the + // soft-deleted object will no longer be restorable. It is equal to the + // soft delete time plus the current soft delete retention duration of + // the bucket. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` + // Id: The ID of the object, including the bucket name, object name, and // generation number. Id string `json:"id,omitempty"` @@ -1849,6 +2394,9 @@ type Object struct { // the object. Owner *ObjectOwner `json:"owner,omitempty"` + // Retention: A collection of object level retention parameters. + Retention *ObjectRetention `json:"retention,omitempty"` + // RetentionExpirationTime: A server-determined value that specifies the // earliest time that the object's retention period expires. This value // is in RFC 3339 format. Note 1: This field is not provided for objects @@ -1864,6 +2412,10 @@ type Object struct { // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` + // SoftDeleteTime: The time at which the object became soft-deleted in + // RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` + // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` @@ -1879,9 +2431,9 @@ type Object struct { // TimeCreated: The creation time of the object in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. + // TimeDeleted: The time at which the object became noncurrent in RFC + // 3339 format. Will be returned if and only if this version of the + // object has been deleted. TimeDeleted string `json:"timeDeleted,omitempty"` // TimeStorageClassUpdated: The time at which the object's storage class @@ -1990,6 +2542,39 @@ func (s *ObjectOwner) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ObjectRetention: A collection of object level retention parameters. +type ObjectRetention struct { + // Mode: The bucket's object retention mode, can only be Unlocked or + // Locked. + Mode string `json:"mode,omitempty"` + + // RetainUntilTime: A time in RFC 3339 format until which object + // retention protects this object. + RetainUntilTime string `json:"retainUntilTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod ObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ObjectAccessControl: An access-control entry. type ObjectAccessControl struct { // Bucket: The name of the bucket. @@ -2187,7 +2772,7 @@ func (s *Objects) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: A bucket/object IAM policy. +// Policy: A bucket/object/managedFolder IAM policy. type Policy struct { // Bindings: An association between a role, which comes with a set of // permissions, and members who may assume that role. @@ -2201,8 +2786,9 @@ type Policy struct { Kind string `json:"kind,omitempty"` // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, and - // projects/_/buckets/bucket/objects/object for objects. A specific + // be of the form projects/_/buckets/bucket for buckets, + // projects/_/buckets/bucket/objects/object for objects, and + // projects/_/buckets/bucket/managedFolders/managedFolder. A specific // generation may be specified by appending #generationNumber to the end // of the object name, e.g. // projects/_/buckets/my-bucket/objects/data.txt#17. The current @@ -2419,15 +3005,15 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { } // TestIamPermissionsResponse: A -// storage.(buckets|objects).testIamPermissions response. +// storage.(buckets|objects|managedFolders).testIamPermissions response. type TestIamPermissionsResponse struct { // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // Permissions: The permissions held by the caller. Permissions are // always of the format storage.resource.capability, where resource is - // one of buckets or objects. The supported permissions are as follows: - // + // one of buckets, objects, or managedFolders. The supported permissions + // are as follows: // - storage.buckets.delete — Delete bucket. // - storage.buckets.get — Read bucket metadata. // - storage.buckets.getIamPolicy — Read bucket IAM policy. @@ -2442,6 +3028,14 @@ type TestIamPermissionsResponse struct { // - storage.objects.list — List objects. // - storage.objects.setIamPolicy — Update object IAM policy. // - storage.objects.update — Update object metadata. + // - storage.managedFolders.delete — Delete managed folder. + // - storage.managedFolders.get — Read managed folder metadata. + // - storage.managedFolders.getIamPolicy — Read managed folder IAM + // policy. + // - storage.managedFolders.create — Create managed folder. + // - storage.managedFolders.list — List managed folders. + // - storage.managedFolders.setIamPolicy — Update managed folder IAM + // policy. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2471,42 +3065,32 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// method id "storage.bucketAccessControls.delete": +// method id "storage.anywhereCaches.disable": -type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheDisableCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. +// Disable: Disables an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Disable(bucket string, anywhereCacheId string) *AnywhereCacheDisableCall { + c := &AnywhereCacheDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { +func (c *AnywhereCacheDisableCall) Fields(s ...googleapi.Field) *AnywhereCacheDisableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2514,21 +3098,21 @@ func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { +func (c *AnywhereCacheDisableCall) Context(ctx context.Context) *AnywhereCacheDisableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsDeleteCall) Header() http.Header { +func (c *AnywhereCacheDisableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2538,105 +3122,119 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.anywhereCaches.disable" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", + // "description": "Disables an Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.disable", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", + // "response": { + // "$ref": "AnywhereCache" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.get": +// method id "storage.anywhereCaches.get": -type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AnywhereCacheGetCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. +// Get: Returns the metadata of an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Get(bucket string, anywhereCacheId string) *AnywhereCacheGetCall { + c := &AnywhereCacheGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) Fields(s ...googleapi.Field) *AnywhereCacheGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2646,7 +3244,7 @@ func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccess // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) IfNoneMatch(entityTag string) *AnywhereCacheGetCall { c.ifNoneMatch_ = entityTag return c } @@ -2654,21 +3252,21 @@ func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAcces // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) Context(ctx context.Context) *AnywhereCacheGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsGetCall) Header() http.Header { +func (c *AnywhereCacheGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2681,7 +3279,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2689,20 +3287,20 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.get" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2721,7 +3319,7 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2733,76 +3331,67 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA } return ret, nil // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "description": "Returns the metadata of an Anywhere Cache instance.", // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", + // "id": "storage.anywhereCaches.get", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] // } } -// method id "storage.bucketAccessControls.insert": +// method id "storage.anywhereCaches.insert": -type BucketAccessControlsInsertCall struct { - s *Service - bucket string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheInsertCall struct { + s *Service + bucket string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new ACL entry on the specified bucket. +// Insert: Creates an Anywhere Cache instance. // -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCacheInsertCall { + c := &AnywhereCacheInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) + c.anywherecache = anywherecache return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { +func (c *AnywhereCacheInsertCall) Fields(s ...googleapi.Field) *AnywhereCacheInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2810,21 +3399,21 @@ func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { +func (c *AnywhereCacheInsertCall) Context(ctx context.Context) *AnywhereCacheInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsInsertCall) Header() http.Header { +func (c *AnywhereCacheInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2832,14 +3421,14 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -2852,14 +3441,14 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.insert" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2878,7 +3467,7 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &GoogleLongrunningOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2890,43 +3479,39 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Creates a new ACL entry on the specified bucket.", + // "description": "Creates an Anywhere Cache instance.", // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", + // "id": "storage.anywhereCaches.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl", + // "path": "b/{bucket}/anywhereCaches", // "request": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "GoogleLongrunningOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.list": +// method id "storage.anywhereCaches.list": -type BucketAccessControlsListCall struct { +type AnywhereCacheListCall struct { s *Service bucket string urlParams_ gensupport.URLParams @@ -2935,26 +3520,35 @@ type BucketAccessControlsListCall struct { header_ http.Header } -// List: Retrieves ACL entries on the specified bucket. +// List: Returns a list of Anywhere Cache instances of the bucket +// matching the criteria. // -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) List(bucket string) *AnywhereCacheListCall { + c := &AnywhereCacheListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items return in a single page of responses. Maximum 1000. +func (c *AnywhereCacheListCall) PageSize(pageSize int64) *AnywhereCacheListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *AnywhereCacheListCall) PageToken(pageToken string) *AnywhereCacheListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) Fields(s ...googleapi.Field) *AnywhereCacheListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2964,7 +3558,7 @@ func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAcces // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) IfNoneMatch(entityTag string) *AnywhereCacheListCall { c.ifNoneMatch_ = entityTag return c } @@ -2972,21 +3566,21 @@ func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) Context(ctx context.Context) *AnywhereCacheListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsListCall) Header() http.Header { +func (c *AnywhereCacheListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2999,7 +3593,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCache") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3012,14 +3606,14 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.list" call. +// Exactly one of *AnywhereCaches or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCaches.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { +func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3038,7 +3632,7 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControls{ + ret := &AnywhereCaches{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3050,74 +3644,94 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket } return ret, nil // { - // "description": "Retrieves ACL entries on the specified bucket.", + // "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", + // "id": "storage.anywhereCaches.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "pageSize": { + // "description": "Maximum number of items return in a single page of responses. Maximum 1000.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/acl", + // "path": "b/{bucket}/anywhereCache", // "response": { - // "$ref": "BucketAccessControls" + // "$ref": "AnywhereCaches" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.patch": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AnywhereCacheListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BucketAccessControlsPatchCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "storage.anywhereCaches.pause": + +type AnywhereCachePauseCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches an ACL entry on the specified bucket. +// Pause: Pauses an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Pause(bucket string, anywhereCacheId string) *AnywhereCachePauseCall { + c := &AnywhereCachePauseCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { +func (c *AnywhereCachePauseCall) Fields(s ...googleapi.Field) *AnywhereCachePauseCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3125,21 +3739,21 @@ func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { +func (c *AnywhereCachePauseCall) Context(ctx context.Context) *AnywhereCachePauseCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsPatchCall) Header() http.Header { +func (c *AnywhereCachePauseCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCachePauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3147,35 +3761,30 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.pause" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3194,7 +3803,7 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3206,84 +3815,66 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke } return ret, nil // { - // "description": "Patches an ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", + // "description": "Pauses an Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.pause", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.update": +// method id "storage.anywhereCaches.resume": -type BucketAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheResumeCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an ACL entry on the specified bucket. +// Resume: Resumes a paused or disabled Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Resume(bucket string, anywhereCacheId string) *AnywhereCacheResumeCall { + c := &AnywhereCacheResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { +func (c *AnywhereCacheResumeCall) Fields(s ...googleapi.Field) *AnywhereCacheResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3291,21 +3882,21 @@ func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { +func (c *AnywhereCacheResumeCall) Context(ctx context.Context) *AnywhereCacheResumeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsUpdateCall) Header() http.Header { +func (c *AnywhereCacheResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3313,35 +3904,30 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.resume" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3360,7 +3946,7 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3372,93 +3958,69 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", + // "description": "Resumes a paused or disabled Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.resume", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.buckets.delete": +// method id "storage.anywhereCaches.update": -type BucketsDeleteCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheUpdateCall struct { + s *Service + bucket string + anywhereCacheId string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes an empty bucket. +// Update: Updates the config(ttl and admissionPolicy) of an Anywhere +// Cache instance. // -// - bucket: Name of a bucket. -func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCacheUpdateCall { + c := &AnywhereCacheUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. -func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the bucket if its -// metageneration does not match this value. -func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId + c.anywherecache = anywherecache return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { +func (c *AnywhereCacheUpdateCall) Fields(s ...googleapi.Field) *AnywhereCacheUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3466,21 +4028,21 @@ func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { +func (c *AnywhereCacheUpdateCall) Context(ctx context.Context) *AnywhereCacheUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsDeleteCall) Header() http.Header { +func (c *AnywhereCacheUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3488,66 +4050,93 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.anywhereCaches.update" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AnywhereCacheUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", + // "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", + // "httpMethod": "PATCH", + // "id": "storage.anywhereCaches.update", // "parameterOrder": [ - // "bucket" + // "bucket", + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", + // "bucket": { + // "description": "Name of the partent bucket", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "b/{bucket}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + // "request": { + // "$ref": "AnywhereCache" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", @@ -3557,59 +4146,34 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "storage.buckets.get": +// method id "storage.bucketAccessControls.delete": -type BucketsGetCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns metadata for the specified bucket. +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.urlParams_.Set("projection", projection) + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -3617,107 +4181,71 @@ func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsGetCall) Header() http.Header { +func (c *BucketAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -3726,29 +4254,10 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, // "type": "string" // }, // "userProject": { @@ -3757,54 +4266,44 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // } // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, + // "path": "b/{bucket}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.getIamPolicy": +// method id "storage.bucketAccessControls.get": -type BucketsGetIamPolicyCall struct { +type BucketAccessControlsGetCall struct { s *Service bucket string + entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Returns an IAM policy for the specified bucket. +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { - c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": The IAM policy format version to be -// returned. If the optionsRequestedPolicyVersion is for an older -// version that doesn't support part of the requested IAM policy, the -// request fails. -func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -3812,7 +4311,7 @@ func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3822,7 +4321,7 @@ func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPol // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -3830,21 +4329,21 @@ func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsGetIamPolicyCall) Header() http.Header { +func (c *BucketAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3857,7 +4356,7 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3866,18 +4365,19 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3896,7 +4396,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3908,11 +4408,12 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Returns an IAM policy for the specified bucket.", + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.getIamPolicy", + // "id": "storage.bucketAccessControls.get", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -3921,12 +4422,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, - // "optionsRequestedPolicyVersion": { - // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - // "format": "int32", - // "location": "query", - // "minimum": "1", - // "type": "integer" + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", @@ -3934,9 +4434,9 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // } // }, - // "path": "b/{bucket}/iam", + // "path": "b/{bucket}/acl/{entity}", // "response": { - // "$ref": "Policy" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -3946,100 +4446,30 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } -// method id "storage.buckets.insert": +// method id "storage.bucketAccessControls.insert": -type BucketsInsertCall struct { - s *Service - bucket *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new bucket. +// Insert: Creates a new ACL entry on the specified bucket. // -// - project: A valid API project identifier. -func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.urlParams_.Set("projection", projection) + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4047,7 +4477,7 @@ func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4055,21 +4485,21 @@ func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsInsertCall) Header() http.Header { +func (c *BucketAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4077,37 +4507,40 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, @@ -4120,7 +4553,7 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4132,151 +4565,63 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Creates a new bucket.", + // "description": "Creates a new ACL entry on the specified bucket.", // "httpMethod": "POST", - // "id": "storage.buckets.insert", + // "id": "storage.bucketAccessControls.insert", // "parameterOrder": [ - // "project" + // "bucket" // ], // "parameters": { - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b", + // "path": "b/{bucket}/acl", // "request": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.list": +// method id "storage.bucketAccessControls.list": -type BucketsListCall struct { +type BucketAccessControlsListCall struct { s *Service + bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of buckets for a given project. -// -// - project: A valid API project identifier. -func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return in a single response. The service will use this -// parameter or 1,000 items, whichever is smaller. -func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. -func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: +// List: Retrieves ACL entries on the specified bucket. // -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.urlParams_.Set("projection", projection) +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4284,7 +4629,7 @@ func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4294,7 +4639,7 @@ func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } @@ -4302,21 +4647,21 @@ func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsListCall) Header() http.Header { +func (c *BucketAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4329,24 +4674,27 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4365,7 +4713,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Buckets{ + ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4377,117 +4725,66 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { } return ret, nil // { - // "description": "Retrieves a list of buckets for a given project.", + // "description": "Retrieves ACL entries on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.list", + // "id": "storage.bucketAccessControls.list", // "parameterOrder": [ - // "project" + // "bucket" // ], // "parameters": { - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b", + // "path": "b/{bucket}/acl", // "response": { - // "$ref": "Buckets" + // "$ref": "BucketAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.buckets.lockRetentionPolicy": +// method id "storage.bucketAccessControls.patch": -type BucketsLockRetentionPolicyCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// LockRetentionPolicy: Locks retention policy on a bucket. +// Patch: Patches an ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. -func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { - c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4495,7 +4792,7 @@ func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *Bucket // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4503,21 +4800,21 @@ func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsLockRetentionPolicyCall) Header() http.Header { +func (c *BucketAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4525,29 +4822,35 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.lockRetentionPolicy" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4566,7 +4869,7 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4578,12 +4881,12 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Locks retention policy on a bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.lockRetentionPolicy", + // "description": "Patches an ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", // "parameterOrder": [ // "bucket", - // "ifMetagenerationMatch" + // "entity" // ], // "parameters": { // "bucket": { @@ -4592,10 +4895,9 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", // "required": true, // "type": "string" // }, @@ -4605,132 +4907,50 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "type": "string" // } // }, - // "path": "b/{bucket}/lockRetentionPolicy", + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.patch": +// method id "storage.bucketAccessControls.update": -type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// Update: Updates an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.urlParams_.Set("projection", projection) + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4738,7 +4958,7 @@ func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4746,21 +4966,21 @@ func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsPatchCall) Header() http.Header { +func (c *BucketAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4768,34 +4988,35 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4814,7 +5035,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4826,11 +5047,12 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -4839,69 +5061,10 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, // "type": "string" // }, // "userProject": { @@ -4910,12 +5073,12 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // } // }, - // "path": "b/{bucket}", + // "path": "b/{bucket}/acl/{entity}", // "request": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4925,30 +5088,44 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } -// method id "storage.buckets.setIamPolicy": +// method id "storage.buckets.delete": -type BucketsSetIamPolicyCall struct { +type BucketsDeleteCall struct { s *Service bucket string - policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Updates an IAM policy for the specified bucket. +// Delete: Permanently deletes an empty bucket. // // - bucket: Name of a bucket. -func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { - c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.policy = policy + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4956,7 +5133,7 @@ func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4964,21 +5141,21 @@ func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPol // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsSetIamPolicyCall) Header() http.Header { +func (c *BucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4986,16 +5163,11 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -5006,47 +5178,22 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Updates an IAM policy for the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.setIamPolicy", + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", // "parameterOrder": [ // "bucket" // ], @@ -5057,30 +5204,37 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, + // "path": "b/{bucket}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.buckets.testIamPermissions": +// method id "storage.buckets.get": -type BucketsTestIamPermissionsCall struct { +type BucketsGetCall struct { s *Service bucket string urlParams_ gensupport.URLParams @@ -5089,21 +5243,48 @@ type BucketsTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests a set of permissions on the given bucket to -// see which, if any, are held by the caller. +// Get: Returns metadata for the specified bucket. // // - bucket: Name of a bucket. -// - permissions: Permissions to test. -func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { - c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5111,7 +5292,7 @@ func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *Buckets // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5121,7 +5302,7 @@ func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTes // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -5129,21 +5310,21 @@ func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsTestIamPermissionsCall) Header() http.Header { +func (c *BucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5156,7 +5337,7 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5169,14 +5350,14 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5195,7 +5376,7 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5207,12 +5388,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } return ret, nil // { - // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "description": "Returns metadata for the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.testIamPermissions", + // "id": "storage.buckets.get", // "parameterOrder": [ - // "bucket", - // "permissions" + // "bucket" // ], // "parameters": { // "bucket": { @@ -5221,11 +5401,29 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, - // "permissions": { - // "description": "Permissions to test.", + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], // "location": "query", - // "repeated": true, - // "required": true, // "type": "string" // }, // "userProject": { @@ -5234,9 +5432,9 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // } // }, - // "path": "b/{bucket}/iam/testPermissions", + // "path": "b/{bucket}", // "response": { - // "$ref": "TestIamPermissionsResponse" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5249,119 +5447,39 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } -// method id "storage.buckets.update": +// method id "storage.buckets.getIamPolicy": -type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// GetIamPolicy: Returns an IAM policy for the specified bucket. // // - bucket: Name of a bucket. -func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.urlParams_.Set("projection", projection) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5369,46 +5487,54 @@ func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsUpdateCall) Header() http.Header { +func (c *BucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -5419,14 +5545,14 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// Do executes the "storage.buckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5445,7 +5571,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5457,9 +5583,9 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", + // "description": "Returns an IAM policy for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.getIamPolicy", // "parameterOrder": [ // "bucket" // ], @@ -5470,83 +5596,22 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "1", + // "type": "integer" // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, + // "path": "b/{bucket}/iam", // "response": { - // "$ref": "Bucket" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5556,133 +5621,108 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } -// method id "storage.channels.stop": +// method id "storage.buckets.insert": -type ChannelsStopCall struct { +type BucketsInsertCall struct { s *Service - channel *Channel + bucket *Bucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Stop: Stop watching resources through this channel -func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.channel = channel +// Insert: Creates a new bucket. +// +// - project: A valid API project identifier. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// EnableObjectRetention sets the optional parameter +// "enableObjectRetention": When set to true, object retention is +// enabled for this bucket. +func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *BucketsInsertCall { + c.urlParams_.Set("enableObjectRetention", fmt.Sprint(enableObjectRetention)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { - c.ctx_ = ctx +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChannelsStopCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.delete": - -type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c } -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5690,7 +5730,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5698,21 +5738,21 @@ func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *De // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { +func (c *BucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5720,99 +5760,212 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, + // "enableObjectRetention": { + // "default": "false", + // "description": "When set to true, object retention is enabled for this bucket.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", // "required": true, // "type": "string" // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "description": "The project to be billed for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.get": +// method id "storage.buckets.list": -type DefaultObjectAccessControlsGetCall struct { +type BucketsListCall struct { s *Service - bucket string - entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. +// List: Retrieves a list of buckets for a given project. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity +// - project: A valid API project identifier. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return in a single response. The service will use this +// parameter or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5820,7 +5973,7 @@ func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *De // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5830,7 +5983,7 @@ func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *Defau // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -5838,21 +5991,21 @@ func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *Defa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { +func (c *BucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5865,28 +6018,24 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5905,7 +6054,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5917,69 +6066,117 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "description": "Retrieves a list of buckets for a given project.", // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", + // "id": "storage.buckets.list", // "parameterOrder": [ - // "bucket", - // "entity" + // "project" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", // "required": true, // "type": "string" // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "description": "The project to be billed for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Buckets" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.insert": - -type DefaultObjectAccessControlsInsertCall struct { - s *Service - bucket string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new default object ACL entry on the specified -// bucket. +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.lockRetentionPolicy": + +type BucketsLockRetentionPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// LockRetentionPolicy: Locks retention policy on a bucket. // -// - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. +func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { + c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.objectaccesscontrol = objectaccesscontrol + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5987,7 +6184,7 @@ func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5995,21 +6192,21 @@ func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *De // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { +func (c *BucketsLockRetentionPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6017,14 +6214,9 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -6037,14 +6229,14 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.lockRetentionPolicy" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6063,7 +6255,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6075,11 +6267,12 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new default object ACL entry on the specified bucket.", + // "description": "Locks retention policy on a bucket.", // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", + // "id": "storage.buckets.lockRetentionPolicy", // "parameterOrder": [ - // "bucket" + // "bucket", + // "ifMetagenerationMatch" // ], // "parameters": { // "bucket": { @@ -6088,67 +6281,145 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/lockRetentionPolicy", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.list": +// method id "storage.buckets.patch": -type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves default object ACL entries on the specified bucket. +// Patch: Patches a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. // // - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket + c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6156,54 +6427,46 @@ func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *D // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsListCall) Header() http.Header { +func (c *BucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -6214,18 +6477,18 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { res.Body.Close() } return nil, gensupport.WrapError(&googleapi.Error{ @@ -6240,7 +6503,7 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControls{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6252,9 +6515,9 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", + // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", // "parameterOrder": [ // "bucket" // ], @@ -6266,26 +6529,82 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl", + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, // "response": { - // "$ref": "ObjectAccessControls" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6295,35 +6614,30 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( } -// method id "storage.defaultObjectAccessControls.patch": +// method id "storage.buckets.setIamPolicy": -type DefaultObjectAccessControlsPatchCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches a default object ACL entry on the specified bucket. +// SetIamPolicy: Updates an IAM policy for the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.policy = policy return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6331,7 +6645,7 @@ func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6339,21 +6653,21 @@ func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *Def // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { +func (c *BucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6361,35 +6675,34 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6408,7 +6721,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6420,12 +6733,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Patches a default object ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", + // "description": "Updates an IAM policy for the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.setIamPolicy", // "parameterOrder": [ - // "bucket", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -6434,24 +6746,18 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b/{bucket}/iam", // "request": { - // "$ref": "ObjectAccessControl" + // "$ref": "Policy" // }, // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6461,35 +6767,32 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) } -// method id "storage.defaultObjectAccessControls.update": +// method id "storage.buckets.testIamPermissions": -type DefaultObjectAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a default object ACL entry on the specified bucket. +// TestIamPermissions: Tests a set of permissions on the given bucket to +// see which, if any, are held by the caller. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - permissions: Permissions to test. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6497,65 +6800,72 @@ func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { +func (c *BucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.buckets.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6574,7 +6884,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6586,12 +6896,12 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" + // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.buckets.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "permissions" // ], // "parameters": { // "bucket": { @@ -6600,9 +6910,10 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, // "required": true, // "type": "string" // }, @@ -6612,46 +6923,134 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/iam/testPermissions", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.notifications.delete": +// method id "storage.buckets.update": -type NotificationsDeleteCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes a notification subscription. +// Update: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. // -// - bucket: The parent bucket of the notification. -// - notification: ID of the notification to delete. -func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { - c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.notification = notification + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6659,7 +7058,7 @@ func (c *NotificationsDeleteCall) UserProject(userProject string) *Notifications // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6667,21 +7066,21 @@ func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsDeleteCall) Header() http.Header { +func (c *BucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6689,230 +7088,250 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, + "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.delete" call. -func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes a notification subscription.", - // "httpMethod": "DELETE", - // "id": "storage.notifications.delete", + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", // "parameterOrder": [ - // "bucket", - // "notification" + // "bucket" // ], // "parameters": { // "bucket": { - // "description": "The parent bucket of the notification.", + // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, - // "notification": { - // "description": "ID of the notification to delete.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", // "location": "query", // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.get": - -type NotificationsGetCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: View a notification configuration. -// -// - bucket: The parent bucket of the notification. -// - notification: Notification ID. -func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { - c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { - c.urlParams_.Set("userProject", userProject) +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsGetCall) Header() http.Header { +func (c *ChannelsStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.get" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "View a notification configuration.", - // "httpMethod": "GET", - // "id": "storage.notifications.get", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "Notification ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "response": { - // "$ref": "Notification" + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6925,30 +7344,34 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, } -// method id "storage.notifications.insert": +// method id "storage.defaultObjectAccessControls.delete": -type NotificationsInsertCall struct { - s *Service - bucket string - notification *Notification - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a notification subscription for a given bucket. +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. // -// - bucket: The parent bucket of the notification. -func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { - c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.notification = notification + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6956,7 +7379,7 @@ func (c *NotificationsInsertCall) UserProject(userProject string) *Notifications // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6964,21 +7387,21 @@ func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsInsertCall) Header() http.Header { +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6986,73 +7409,51 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.insert" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Creates a notification subscription for a given bucket.", - // "httpMethod": "POST", - // "id": "storage.notifications.insert", + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { - // "description": "The parent bucket of the notification.", + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7063,46 +7464,44 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio // "type": "string" // } // }, - // "path": "b/{bucket}/notificationConfigs", - // "request": { - // "$ref": "Notification" - // }, - // "response": { - // "$ref": "Notification" - // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.notifications.list": +// method id "storage.defaultObjectAccessControls.get": -type NotificationsListCall struct { +type DefaultObjectAccessControlsGetCall struct { s *Service bucket string + entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of notification subscriptions for a given -// bucket. +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. // -// - bucket: Name of a Google Cloud Storage bucket. -func (r *NotificationsService) List(bucket string) *NotificationsListCall { - c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7110,7 +7509,7 @@ func (c *NotificationsListCall) UserProject(userProject string) *NotificationsLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7120,7 +7519,7 @@ func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -7128,21 +7527,21 @@ func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsListCall) Header() http.Header { +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7155,7 +7554,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7164,18 +7563,19 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.list" call. -// Exactly one of *Notifications or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notifications.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7194,7 +7594,7 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Notifications{ + ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7206,15 +7606,22 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications } return ret, nil // { - // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.notifications.list", + // "id": "storage.defaultObjectAccessControls.get", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { - // "description": "Name of a Google Cloud Storage bucket.", + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7225,62 +7632,43 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications // "type": "string" // } // }, - // "path": "b/{bucket}/notificationConfigs", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "response": { - // "$ref": "Notifications" + // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objectAccessControls.delete": +// method id "storage.defaultObjectAccessControls.insert": -type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. +// Insert: Creates a new default object ACL entry on the specified +// bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.objectaccesscontrol = objectaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7288,7 +7676,7 @@ func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *Object // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7296,21 +7684,21 @@ func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsDeleteCall) Header() http.Header { +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7318,43 +7706,69 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", // "parameterOrder": [ - // "bucket", - // "object", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -7363,81 +7777,67 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objectAccessControls.get": +// method id "storage.defaultObjectAccessControls.list": -type ObjectAccessControlsGetCall struct { +type DefaultObjectAccessControlsListCall struct { s *Service bucket string - object string - entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// object. +// List: Retrieves default object ACL entries on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7445,7 +7845,7 @@ func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAcc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7455,7 +7855,7 @@ func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccess // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } @@ -7463,21 +7863,21 @@ func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAcces // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsGetCall) Header() http.Header { +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7490,7 +7890,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7499,20 +7899,18 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was +// *ObjectAccessControls.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7531,7 +7929,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7543,13 +7941,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA } return ret, nil // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "description": "Retrieves default object ACL entries on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", + // "id": "storage.defaultObjectAccessControls.list", // "parameterOrder": [ - // "bucket", - // "object", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -7558,22 +7954,16 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, // "userProject": { @@ -7582,9 +7972,9 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "path": "b/{bucket}/defaultObjectAcl", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -7594,43 +7984,35 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA } -// method id "storage.objectAccessControls.insert": +// method id "storage.defaultObjectAccessControls.patch": -type ObjectAccessControlsInsertCall struct { +type DefaultObjectAccessControlsPatchCall struct { s *Service bucket string - object string + entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a new ACL entry on the specified object. +// Patch: Patches a default object ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object + c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7638,7 +8020,7 @@ func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *Object // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7646,21 +8028,21 @@ func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsInsertCall) Header() http.Header { +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7675,28 +8057,28 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.insert" call. +// Do executes the "storage.defaultObjectAccessControls.patch" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7727,12 +8109,12 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje } return ret, nil // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", + // "description": "Patches a default object ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", // "parameterOrder": [ // "bucket", - // "object" + // "entity" // ], // "parameters": { // "bucket": { @@ -7741,14 +8123,8 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7759,7 +8135,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, @@ -7774,42 +8150,35 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje } -// method id "storage.objectAccessControls.list": +// method id "storage.defaultObjectAccessControls.update": -type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves ACL entries on the specified object. +// Update: Updates a default object ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7817,73 +8186,65 @@ func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsListCall) Header() http.Header { +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was +// *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7902,7 +8263,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControls{ + ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7914,12 +8275,12 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object } return ret, nil // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", // "parameterOrder": [ // "bucket", - // "object" + // "entity" // ], // "parameters": { // "bucket": { @@ -7928,14 +8289,8 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7946,9 +8301,12 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, // "response": { - // "$ref": "ObjectAccessControls" + // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -7958,56 +8316,48 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object } -// method id "storage.objectAccessControls.patch": +// method id "storage.managedFolders.delete": -type ObjectAccessControlsPatchCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersDeleteCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches an ACL entry on the specified object. +// Delete: Permanently deletes a managed folder. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *ManagedFoldersDeleteCall { + c := &ManagedFoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.managedFolder = managedFolder return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the managed folder if +// its metageneration matches this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the managed folder +// if its metageneration does not match this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { +func (c *ManagedFoldersDeleteCall) Fields(s ...googleapi.Field) *ManagedFoldersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8015,21 +8365,21 @@ func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { +func (c *ManagedFoldersDeleteCall) Context(ctx context.Context) *ManagedFoldersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsPatchCall) Header() http.Header { +func (c *ManagedFoldersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -8037,229 +8387,189 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.managedFolders.delete" call. +func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Patches an ACL entry on the specified object.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", + // "description": "Permanently deletes a managed folder.", + // "httpMethod": "DELETE", + // "id": "storage.managedFolders.delete", // "parameterOrder": [ // "bucket", - // "object", - // "entity" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the managed folder if its metageneration matches this value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the managed folder if its metageneration does not match this value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objectAccessControls.update": +// method id "storage.managedFolders.get": -type ObjectAccessControlsUpdateCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersGetCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates an ACL entry on the specified object. +// Get: Returns metadata of the specified managed folder. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Get(bucket string, managedFolder string) *ManagedFoldersGetCall { + c := &ManagedFoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.managedFolder = managedFolder return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration matches the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration does not match the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { +func (c *ManagedFoldersGetCall) Fields(s ...googleapi.Field) *ManagedFoldersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetCall) IfNoneMatch(entityTag string) *ManagedFoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { +func (c *ManagedFoldersGetCall) Context(ctx context.Context) *ManagedFoldersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsUpdateCall) Header() http.Header { +func (c *ManagedFoldersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.managedFolders.get" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8278,7 +8588,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &ManagedFolder{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8290,152 +8600,90 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje } return ret, nil // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", + // "description": "Returns metadata of the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.get", // "parameterOrder": [ // "bucket", - // "object", - // "entity" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "ManagedFolder" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.compose": +// method id "storage.managedFolders.getIamPolicy": -type ObjectsComposeCall struct { - s *Service - destinationBucket string - destinationObject string - composerequest *ComposeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. -// -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.composerequest = composerequest - return c +type ManagedFoldersGetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. +// GetIamPolicy: Returns an IAM policy for the specified managed folder. // -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) GetIamPolicy(bucket string, managedFolder string) *ManagedFoldersGetIamPolicyCall { + c := &ManagedFoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *ManagedFoldersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) UserProject(userProject string) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -8443,65 +8691,73 @@ func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetIamPolicyCall) IfNoneMatch(entityTag string) *ManagedFoldersGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsComposeCall) Header() http.Header { +func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status +// Do executes the "storage.managedFolders.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { +func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8520,7 +8776,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8532,63 +8788,32 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", + // "description": "Returns an IAM policy for the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.getIamPolicy", // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" + // "bucket", + // "managedFolder" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "1", + // "type": "integer" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", @@ -8596,272 +8821,274 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // } // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", // "response": { - // "$ref": "Object" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.copy": +// method id "storage.managedFolders.insert": -type ObjectsCopyCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersInsertCall struct { + s *Service + bucket string + managedfolder *ManagedFolder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. +// Insert: Creates a new managed folder. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) Insert(bucket string, managedfolder *ManagedFolder) *ManagedFoldersInsertCall { + c := &ManagedFoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedfolder = managedfolder return c } -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersInsertCall) Fields(s ...googleapi.Field) *ManagedFoldersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. Setting to 0 makes the -// operation succeed only if there are no live versions of the object. -func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersInsertCall) Context(ctx context.Context) *ManagedFoldersInsertCall { + c.ctx_ = ctx return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. If no live object exists, the precondition fails. Setting to 0 -// makes the operation succeed only if there is a live version of the -// object. -func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c +func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} +// Do executes the "storage.managedFolders.insert" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new managed folder.", + // "httpMethod": "POST", + // "id": "storage.managedFolders.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders", + // "request": { + // "$ref": "ManagedFolder" + // }, + // "response": { + // "$ref": "ManagedFolder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c } -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} +// method id "storage.managedFolders.list": -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c +type ManagedFoldersListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) +// List: Lists managed folders in the given bucket. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall { + c := &ManagedFoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.urlParams_.Set("projection", projection) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items return in a single page of responses. +func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ManagedFoldersListCall) PageToken(pageToken string) *ManagedFoldersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { - c.urlParams_.Set("userProject", userProject) +// Prefix sets the optional parameter "prefix": The managed folder +// name/path prefix to filter the output list of results. +func (c *ManagedFoldersListCall) Prefix(prefix string) *ManagedFoldersListCall { + c.urlParams_.Set("prefix", prefix) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { +func (c *ManagedFoldersListCall) Fields(s ...googleapi.Field) *ManagedFoldersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersListCall) IfNoneMatch(entityTag string) *ManagedFoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { +func (c *ManagedFoldersListCall) Context(ctx context.Context) *ManagedFoldersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsCopyCall) Header() http.Header { +func (c *ManagedFoldersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.managedFolders.list" call. +// Exactly one of *ManagedFolders or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolders.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolders, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8880,7 +9107,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &ManagedFolders{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8892,228 +9119,100 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", + // "description": "Lists managed folders in the given bucket.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.list", // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" + // "bucket" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "pageSize": { + // "description": "Maximum number of items return in a single page of responses.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "0", + // "type": "integer" // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "prefix": { + // "description": "The managed folder name/path prefix to filter the output list of results.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/managedFolders", // "response": { - // "$ref": "Object" + // "$ref": "ManagedFolders" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.delete": - -type ObjectsDeleteCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). -func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolders) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} +// method id "storage.managedFolders.setIamPolicy": -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c +type ManagedFoldersSetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) +// SetIamPolicy: Updates an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) SetIamPolicy(bucket string, managedFolder string, policy *Policy) *ManagedFoldersSetIamPolicyCall { + c := &ManagedFoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.policy = policy return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) UserProject(userProject string) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9121,7 +9220,7 @@ func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9129,21 +9228,21 @@ func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsDeleteCall) Header() http.Header { +func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9151,185 +9250,135 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.managedFolders.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", + // "description": "Updates an IAM policy for the specified managed folder.", + // "httpMethod": "PUT", + // "id": "storage.managedFolders.setIamPolicy", // "parameterOrder": [ // "bucket", - // "object" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objects.get": +// method id "storage.managedFolders.testIamPermissions": -type ObjectsGetCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ManagedFoldersTestIamPermissionsCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Retrieves an object or its metadata. +// TestIamPermissions: Tests a set of permissions on the given managed +// folder to see which, if any, are held by the caller. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +// - permissions: Permissions to test. +func (r *ManagedFoldersService) TestIamPermissions(bucket string, managedFolder string, permissions []string) *ManagedFoldersTestIamPermissionsCall { + c := &ManagedFoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.urlParams_.Set("projection", projection) + c.managedFolder = managedFolder + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) UserProject(userProject string) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9337,7 +9386,7 @@ func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9347,29 +9396,29 @@ func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) IfNoneMatch(entityTag string) *ManagedFoldersTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersTestIamPermissionsCall) Context(ctx context.Context) *ManagedFoldersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsGetCall) Header() http.Header { +func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9382,7 +9431,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9390,36 +9439,20 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, gensupport.WrapError(err) - } - return res, nil -} - -// Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.managedFolders.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9438,7 +9471,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9450,67 +9483,32 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Retrieves an object or its metadata.", + // "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", // "httpMethod": "GET", - // "id": "storage.objects.get", + // "id": "storage.managedFolders.testIamPermissions", // "parameterOrder": [ // "bucket", - // "object" + // "managedFolder", + // "permissions" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], + // "permissions": { + // "description": "Permissions to test.", // "location": "query", + // "repeated": true, + // "required": true, // "type": "string" // }, // "userProject": { @@ -9519,9 +9517,9 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", // "response": { - // "$ref": "Object" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9529,49 +9527,36 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true + // ] // } } -// method id "storage.objects.getIamPolicy": +// method id "storage.notifications.delete": -type ObjectsGetIamPolicyCall struct { +type NotificationsDeleteCall struct { s *Service bucket string - object string + notification string urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Returns an IAM policy for the specified object. +// Delete: Permanently deletes a notification subscription. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { - c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: The parent bucket of the notification. +// - notification: ID of the notification to delete. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.notification = notification return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9579,39 +9564,167 @@ func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsGetIamPolicyCall) Header() http.Header { +func (c *NotificationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Permanently deletes a notification subscription.", + // "httpMethod": "DELETE", + // "id": "storage.notifications.delete", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "ID of the notification to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.get": + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +// +// - bucket: The parent bucket of the notification. +// - notification: Notification ID. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9624,7 +9737,7 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9632,20 +9745,20 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "notification": c.notification, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "storage.notifications.get" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9664,7 +9777,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Notification{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9676,28 +9789,22 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Returns an IAM policy for the specified object.", + // "description": "View a notification configuration.", // "httpMethod": "GET", - // "id": "storage.objects.getIamPolicy", + // "id": "storage.notifications.get", // "parameterOrder": [ // "bucket", - // "object" + // "notification" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "The parent bucket of the notification.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "notification": { + // "description": "Notification ID", // "location": "path", // "required": true, // "type": "string" @@ -9708,9 +9815,9 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/iam", + // "path": "b/{bucket}/notificationConfigs/{notification}", // "response": { - // "$ref": "Policy" + // "$ref": "Notification" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9723,243 +9830,60 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } -// method id "storage.objects.insert": +// method id "storage.notifications.insert": -type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - urlParams_ gensupport.URLParams - mediaInfo_ *gensupport.MediaInfo - retry *gensupport.RetryConfig - ctx_ context.Context - header_ http.Header +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Stores a new object and metadata. +// Insert: Creates a notification subscription for a given bucket. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. -func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: The parent bucket of the notification. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object + c.notification = notification return c } -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. -func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.urlParams_.Set("contentEncoding", contentEncoding) +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) - return c -} - -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.urlParams_.Set("name", name) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { - c.urlParams_.Set("projection", projection) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by -// googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. -// At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { - if ct := c.object.ContentType; ct != "" { - options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) - } - c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) - return c -} - -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. -// -// Deprecated: use Media instead. -// -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. -func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { - c.ctx_ = ctx - c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) - return c -} - -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). -func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.mediaInfo_.SetProgressUpdater(pu) - return c -} - -// WithRetry causes the library to retry the initial request of the -// upload(for resumable uploads) or the entire upload (for multipart -// uploads) ifa transient error occurs. This is contingent on ChunkSize -// being > 0 (sothat the input data may be buffered). The backoff -// argument will be used todetermine exponential backoff timing, and the -// errorFunc is used to determinewhich errors are considered retryable. -// By default, exponetial backoff will beapplied using gax defaults, and -// the following errors are retried: -// -// - HTTP responses with codes 408, 429, 502, 503, and 504. -// -// - Transient network errors such as connection reset and -// io.ErrUnexpectedEOF. -// -// - Errors which are considered transient using the Temporary() -// interface. -// -// - Wrapped versions of these errors. -func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { - c.retry = &gensupport.RetryConfig{ - Backoff: bo, - ShouldRetry: errorFunc, - } - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -// This context will supersede any context previously provided to the -// ResumableMedia method. -func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { - c.ctx_ = ctx +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsInsertCall) Header() http.Header { +func (c *NotificationsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9967,48 +9891,34 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.mediaInfo_ != nil { - urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") - c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) - } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) - defer cleanup() + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - req.GetBody = getBody googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - if c.retry != nil { - return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) - } return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.insert" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.notifications.insert" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10027,25 +9937,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) - if rx != nil { - rx.Client = c.s.client - rx.UserAgent = c.s.userAgent() - rx.Retry = c.retry - ctx := c.ctx_ - if ctx == nil { - ctx = context.TODO() - } - res, err = rx.Upload(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - } - ret := &Object{ + ret := &Notification{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10057,133 +9949,44 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Stores a new object and metadata.", + // "description": "Creates a notification subscription for a given bucket.", // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, + // "id": "storage.notifications.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "description": "The parent bucket of the notification.", // "location": "path", // "required": true, // "type": "string" // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o", + // "path": "b/{bucket}/notificationConfigs", // "request": { - // "$ref": "Object" + // "$ref": "Notification" // }, // "response": { - // "$ref": "Object" + // "$ref": "Notification" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaUpload": true + // ] // } } -// method id "storage.objects.list": +// method id "storage.notifications.list": -type ObjectsListCall struct { +type NotificationsListCall struct { s *Service bucket string urlParams_ gensupport.URLParams @@ -10192,116 +9995,27 @@ type ObjectsListCall struct { header_ http.Header } -// List: Retrieves a list of objects matching the criteria. +// List: Retrieves a list of notification subscriptions for a given +// bucket. // -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a Google Cloud Storage bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { - c.urlParams_.Set("endOffset", endOffset) - return c -} - -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) - return c -} - -// MatchGlob sets the optional parameter "matchGlob": Filter results to -// objects and prefixes that match this glob pattern. -func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { - c.urlParams_.Set("matchGlob", matchGlob) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { - c.urlParams_.Set("projection", projection) - return c -} - -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { - c.urlParams_.Set("startOffset", startOffset) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { c.urlParams_.Set("userProject", userProject) return c } -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10311,7 +10025,7 @@ func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { c.ifNoneMatch_ = entityTag return c } @@ -10319,21 +10033,21 @@ func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsListCall) Header() http.Header { +func (c *NotificationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -10346,7 +10060,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -10359,14 +10073,14 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { +// Do executes the "storage.notifications.list" call. +// Exactly one of *Notifications or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10385,7 +10099,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Objects{ + ret := &Notifications{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10397,89 +10111,28 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { } return ret, nil // { - // "description": "Retrieves a list of objects matching the criteria.", + // "description": "Retrieves a list of notification subscriptions for a given bucket.", // "httpMethod": "GET", - // "id": "storage.objects.list", + // "id": "storage.notifications.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which to look for objects.", + // "description": "Name of a Google Cloud Storage bucket.", // "location": "path", // "required": true, // "type": "string" // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "matchGlob": { - // "description": "Filter results to objects and prefixes that match this glob pattern.", - // "location": "query", - // "type": "string" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "b/{bucket}/o", + // "path": "b/{bucket}/notificationConfigs", // "response": { - // "$ref": "Objects" + // "$ref": "Notifications" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -10487,148 +10140,52 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true + // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.objects.patch": +// method id "storage.objectAccessControls.delete": -type ObjectsPatchCall struct { +type ObjectAccessControlsDeleteCall struct { s *Service bucket string object string - object2 *Object + entity string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches an object's metadata. +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. // -// - bucket: Name of the bucket in which the object resides. +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. // - object: Name of the object. For information about how to URL encode // object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.object2 = object2 + c.entity = entity return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). -func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. -func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -10636,7 +10193,7 @@ func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10644,21 +10201,21 @@ func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsPatchCall) Header() http.Header { +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -10666,16 +10223,11 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -10683,88 +10235,47 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Patches an object's metadata.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", // "parameterOrder": [ // "bucket", - // "object" + // "object", + // "entity" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" @@ -10775,53 +10286,13 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "required": true, // "type": "string" // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, - // "response": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control" @@ -10830,167 +10301,4069 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { } -// method id "storage.objects.rewrite": +// method id "storage.objectAccessControls.get": -type ObjectsRewriteCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Rewrite: Rewrites a source object to a destination object. Optionally -// overrides metadata. +// Get: Returns the ACL entry for the specified entity on the specified +// object. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket c.object = object + c.entity = entity return c } -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) return c } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag return c } -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx return c } -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// MaxBytesRewrittenPerCall sets the optional parameter -// "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified object.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.bulkRestore": + +type ObjectsBulkRestoreCall struct { + s *Service + bucket string + bulkrestoreobjectsrequest *BulkRestoreObjectsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BulkRestore: Initiates a long-running bulk restore operation on the +// specified bucket. +// +// - bucket: Name of the bucket in which the object resides. +func (r *ObjectsService) BulkRestore(bucket string, bulkrestoreobjectsrequest *BulkRestoreObjectsRequest) *ObjectsBulkRestoreCall { + c := &ObjectsBulkRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bulkrestoreobjectsrequest = bulkrestoreobjectsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsBulkRestoreCall) Fields(s ...googleapi.Field) *ObjectsBulkRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsBulkRestoreCall) Context(ctx context.Context) *ObjectsBulkRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsBulkRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/bulkRestore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.bulkRestore" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Initiates a long-running bulk restore operation on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.bulkRestore", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/bulkRestore", + // "request": { + // "$ref": "BulkRestoreObjectsRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +// +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. Setting to 0 makes the +// operation succeed only if there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. If no live object exists, the precondition fails. Setting to 0 +// makes the operation succeed only if there is a live version of the +// object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, gensupport.WrapError(err) + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.getIamPolicy": + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified object.", + // "httpMethod": "GET", + // "id": "storage.objects.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + retry *gensupport.RetryConfig + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +// +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// WithRetry causes the library to retry the initial request of the +// upload(for resumable uploads) or the entire upload (for multipart +// uploads) ifa transient error occurs. This is contingent on ChunkSize +// being > 0 (sothat the input data may be buffered). The backoff +// argument will be used todetermine exponential backoff timing, and the +// errorFunc is used to determinewhich errors are considered retryable. +// By default, exponetial backoff will beapplied using gax defaults, and +// the following errors are retried: +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and +// io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() +// interface. +// +// - Wrapped versions of these errors. +func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { + c.retry = &gensupport.RetryConfig{ + Backoff: bo, + ShouldRetry: errorFunc, + } + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + req.GetBody = getBody + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.retry != nil { + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) + } + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + rx.Retry = c.retry + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeFoldersAsPrefixes sets the optional parameter +// "includeFoldersAsPrefixes": Only applicable if delimiter is set to +// '/'. If true, will also include folders and managed folders (besides +// objects) in the returned prefixes. +func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool) *ObjectsListCall { + c.urlParams_.Set("includeFoldersAsPrefixes", fmt.Sprint(includeFoldersAsPrefixes)) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MatchGlob sets the optional parameter "matchGlob": Filter results to +// objects and prefixes that match this glob pattern. +func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { + c.urlParams_.Set("matchGlob", matchGlob) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "includeFoldersAsPrefixes": { + // "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "matchGlob": { + // "description": "Filter results to objects and prefixes that match this glob pattern.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsPatchCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request, for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an object's metadata.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.restore": + +type ObjectsRestoreCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted object. +// +// - bucket: Name of the bucket in which the object resides. +// - generation: Selects a specific revision of this object. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +func (r *ObjectsService) Restore(bucket string, object string, object2 *Object) *ObjectsRestoreCall { + c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// CopySourceAcl sets the optional parameter "copySourceAcl": If true, +// copies the source object's ACL; otherwise, uses the bucket's default +// object ACL. The default is false. +func (c *ObjectsRestoreCall) CopySourceAcl(copySourceAcl bool) *ObjectsRestoreCall { + c.urlParams_.Set("copySourceAcl", fmt.Sprint(copySourceAcl)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's one live +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRestoreCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// none of the object's live generations match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRestoreCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's one live metageneration matches the given value. +func (c *ObjectsRestoreCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether none of the object's live metagenerations match the given +// value. +func (c *ObjectsRestoreCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRestoreCall) Fields(s ...googleapi.Field) *ObjectsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRestoreCall) Context(ctx context.Context) *ObjectsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.restore" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores a soft-deleted object.", + // "httpMethod": "POST", + // "id": "storage.objects.restore", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "copySourceAcl": { + // "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + // "location": "query", + // "type": "boolean" + // }, + // "generation": { + // "description": "Selects a specific revision of this object.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/restore", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If // specified the value must be an integral multiple of 1 MiB (1048576). // Also, this only applies to requests where the source and destination // span locations and/or storage classes. Finally, this value must not @@ -11001,41 +14374,519 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.urlParams_.Set("projection", projection) - return c +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRewriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.setIamPolicy": + +type ObjectsSetIamPolicyCall struct { + s *Service + bucket string + object string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.policy = policy + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objects.setIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.testIamPermissions": + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. -func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.urlParams_.Set("rewriteToken", rewriteToken) +// TestIamPermissions: Tests a set of permissions on the given object to +// see which, if any, are held by the caller. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - permissions: Permissions to test. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -11043,67 +14894,73 @@ func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsRewriteCall) Header() http.Header { +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, + "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.objects.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11122,7 +14979,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &RewriteResponse{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11134,141 +14991,37 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, } return ret, nil // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", + // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.objects.testIamPermissions", // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" + // "bucket", + // "object", + // "permissions" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "bucket": { + // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", + // "permissions": { + // "description": "Permissions to test.", // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", + // "repeated": true, // "required": true, // "type": "string" // }, @@ -11278,59 +15031,145 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // } // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/o/{object}/iam/testPermissions", // "response": { - // "$ref": "RewriteResponse" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.setIamPolicy": +// method id "storage.objects.update": -type ObjectsSetIamPolicyCall struct { +type ObjectsUpdateCall struct { s *Service bucket string object string - policy *Policy + object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Updates an IAM policy for the specified object. +// Update: Updates an object's metadata. // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode // object names to be path safe, see Encoding URI Path Parts // (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { - c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.policy = policy + c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). -func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsUpdateCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -11338,7 +15177,7 @@ func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11346,21 +15185,21 @@ func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPol // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsSetIamPolicyCall) Header() http.Header { +func (c *ObjectsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -11368,14 +15207,14 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -11389,14 +15228,14 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11415,7 +15254,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11427,9 +15266,9 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Updates an IAM policy for the specified object.", + // "description": "Updates an object's metadata.", // "httpMethod": "PUT", - // "id": "storage.objects.setIamPolicy", + // "id": "storage.objects.update", // "parameterOrder": [ // "bucket", // "object" @@ -11447,147 +15286,268 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "location": "query", // "type": "string" // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, // "object": { // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/iam", + // "path": "b/{bucket}/o/{object}", // "request": { - // "$ref": "Policy" + // "$ref": "Object" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objects.testIamPermissions": +// method id "storage.objects.watchAll": -type ObjectsTestIamPermissionsCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c } -// TestIamPermissions: Tests a set of permissions on the given object to -// see which, if any, are held by the caller. +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -// - permissions: Permissions to test. -func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { - c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { c.urlParams_.Set("userProject", userProject) return c } +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsTestIamPermissionsCall) Header() http.Header { +func (c *ObjectsWatchAllCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11606,7 +15566,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &Channel{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11618,49 +15578,88 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } return ret, nil // { - // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.objects.testIamPermissions", + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", // "parameterOrder": [ - // "bucket", - // "object", - // "permissions" + // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", - // "location": "path", - // "required": true, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", // "type": "string" // }, - // "permissions": { - // "description": "Permissions to test.", + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", // "location": "query", - // "repeated": true, - // "required": true, // "type": "string" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "b/{bucket}/o/{object}/iam/testPermissions", + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, // "response": { - // "$ref": "TestIamPermissionsResponse" + // "$ref": "Channel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11668,134 +15667,40 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ] + // ], + // "supportsSubscription": true // } } -// method id "storage.objects.update": - -type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an object's metadata. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts -// (https://cloud.google.com/storage/docs/request-endpoints#encoding). -func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.urlParams_.Set("projection", projection) - return c +// method id "storage.buckets.operations.cancel": + +type OperationsCancelCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { - c.urlParams_.Set("userProject", userProject) +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Cancel(bucket string, operationId string) *OperationsCancelCall { + c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { +func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11803,21 +15708,21 @@ func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { +func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsUpdateCall) Header() http.Header { +func (c *OperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -11825,324 +15730,322 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "operationId": c.operationId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.buckets.operations.cancel" call. +func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + // "httpMethod": "POST", + // "id": "storage.buckets.operations.cancel", // "parameterOrder": [ // "bucket", - // "object" + // "operationId" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "The parent bucket of the operation resource.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "operationId": { + // "description": "The ID of the operation resource.", // "location": "path", // "required": true, // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", + // } + // }, + // "path": "b/{bucket}/operations/{operationId}/cancel", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.operations.get": + +type OperationsGetCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Get(bucket string, operationId string) *OperationsGetCall { + c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.get" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.get", + // "parameterOrder": [ + // "bucket", + // "operationId" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the operation resource.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", + // "operationId": { + // "description": "The ID of the operation resource.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/operations/{operationId}", // "response": { - // "$ref": "Object" + // "$ref": "GoogleLongrunningOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.watchAll": +// method id "storage.buckets.operations.list": -type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OperationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// WatchAll: Watch for changes on all objects in a bucket. +// List: Lists operations that match the specified filter in the +// request. // -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket in which to look for operations. +func (r *OperationsService) List(bucket string) *OperationsListCall { + c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.channel = channel - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("endOffset", endOffset) return c } -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) +// Filter sets the optional parameter "filter": A filter to narrow down +// results to a preferred subset. The filtering language is documented +// in more detail in AIP-160 (https://google.aip.dev/160). +func (c *OperationsListCall) Filter(filter string) *OperationsListCall { + c.urlParams_.Set("filter", filter) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items to return in a single page of responses. Fewer total results +// may be returned than requested. The service uses this parameter or +// 100 items, whichever is smaller. +func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. -func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { +func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.urlParams_.Set("projection", projection) - return c -} - -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("startOffset", startOffset) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { +func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { +func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsWatchAllCall) Header() http.Header { +func (c *OperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { +func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -12153,14 +16056,15 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { +// Do executes the "storage.buckets.operations.list" call. +// Exactly one of *GoogleLongrunningListOperationsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningListOperationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12179,7 +16083,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Channel{ + ret := &GoogleLongrunningListOperationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12191,38 +16095,27 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) } return ret, nil // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", + // "description": "Lists operations that match the specified filter in the request.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which to look for objects.", + // "description": "Name of the bucket in which to look for operations.", // "location": "path", // "required": true, // "type": "string" // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "filter": { + // "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", // "location": "query", // "type": "string" // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", + // "pageSize": { + // "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + // "format": "int32", // "location": "query", // "minimum": "0", // "type": "integer" @@ -12231,48 +16124,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, + // "path": "b/{bucket}/operations", // "response": { - // "$ref": "Channel" + // "$ref": "GoogleLongrunningListOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -12280,12 +16136,32 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true + // ] // } } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunningListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "storage.projects.hmacKeys.create": type ProjectsHmacKeysCreateCall struct { diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index e1403e08ee68d..87a22f75863c1 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -14,10 +14,12 @@ import ( "net" "os" "strings" + "time" "cloud.google.com/go/compute/metadata" "go.opencensus.io/plugin/ocgrpc" "golang.org/x/oauth2" + "golang.org/x/time/rate" "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" @@ -35,12 +37,12 @@ const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" // Check env to decide if using google-c2p resolver for DirectPath traffic. const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. var timeoutDialerOption grpc.DialOption +// Log rate limiter +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -156,6 +158,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C ) // Attempt Direct Path: + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds.TokenSource, o) + }) if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ @@ -186,12 +191,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C } } - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - // appengine dialer will override socketopt dialer - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. @@ -305,6 +304,24 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } +func logDirectPathMisconfig(endpoint string, ts oauth2.TokenSource, o *internal.DialSettings) { + if isDirectPathXdsUsed(o) { + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + log.Println("WARNING: DirectPath is misconfigured. Please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenSourceDirectPathCompatible(ts, o) { + log.Println("WARNING: DirectPath is misconfigured. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !metadata.OnGCE() { + log.Println("WARNING: DirectPath is misconfigured. DirectPath is only available in a GCE environment.") + } + } + } +} + func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { var o internal.DialSettings for _, opt := range opts { diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go deleted file mode 100644 index fd3dc0565d038..0000000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - // NOTE: dev_appserver doesn't currently support SSL. - // When it does, this code can be removed. - if appengine.IsDevAppServer() { - return - } - - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index eca0c3ba795ab..a07362ffdbd38 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -145,22 +145,13 @@ func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error return rt.RoundTrip(&newReq) } -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport. -// Otherwise, use a default transport, taking most defaults from -// http.DefaultTransport. +// defaultBaseTransport returns the base HTTP transport. It uses a default +// transport, taking most defaults from http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, - // which is increased due to reported performance issues under load in the GCS - // client. Transport.Clone is only available in Go 1.13 and up. + // which is increased due to reported performance issues under load in the + // GCS client. Transport.Clone is only available in Go 1.13 and up. trans := clonedTransport(http.DefaultTransport) if trans == nil { trans = fallbackBaseTransport() diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go deleted file mode 100644 index f064e133f71d3..0000000000000 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package http - -import ( - "context" - "net/http" - - "google.golang.org/appengine/urlfetch" -) - -func init() { - appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { - return &urlfetch.Transport{Context: ctx} - } -} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36e8ca..0000000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc298520856c..289693613cccd 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c6016..5ccddd9990dd8 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674f20d..35ba9c896766d 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad3bec..6e1d041cd95be 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f361989..1202fc1a5317c 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a1b1..0569f5dd43e49 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35c2c..87c33c798e879 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b7242..5b95c13d92661 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e425732..0f95aa91d5bc6 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e34d5..5ad3548bf7416 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3f8c..4201b6b585a69 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d80672635560..18ddda3a4235e 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd180..afd0ae84fdfbf 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333879a..86a8caf06f31a 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e4606b7..0000000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953dc0a1..0000000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae65380a7..2ae8ab9fa4212 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca08227c..6f169be487d62 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df826b29..0000000000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d36d7d..0000000000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a1c0b0..0000000000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a992a39e..fcf3ad0a58f11 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e8ec..0000000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b6a0..0000000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d901af..6c0d72418d890 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 0000000000000..d02e6bbc890ac --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index 15ad78adccdd3..9941e81aa37c5 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/bigtable/admin/v2/bigtable_instance_admin.proto package admin @@ -25,9 +25,9 @@ import ( reflect "reflect" sync "sync" + iampb "cloud.google.com/go/iam/apiv1/iampb" + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" _ "google.golang.org/genproto/googleapis/api/annotations" - v1 "google.golang.org/genproto/googleapis/iam/v1" - longrunning "google.golang.org/genproto/googleapis/longrunning" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -112,11 +112,11 @@ type CreateInstanceRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the project in which to create the new instance. - // Values are of the form `projects/{project}`. + // Required. The unique name of the project in which to create the new + // instance. Values are of the form `projects/{project}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than + // Required. The ID to be used when referring to the new instance within its + // project, e.g., just `myinstance` rather than // `projects/myproject/instances/myinstance`. InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` // Required. The instance to create. @@ -246,8 +246,8 @@ type ListInstancesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/{project}`. + // Required. The unique name of the project for which a list of instances is + // requested. Values are of the form `projects/{project}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // DEPRECATED: This field is unused and ignored. PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` @@ -486,12 +486,11 @@ type CreateClusterRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance in which to create the new + // cluster. Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than + // Required. The ID to be used when referring to the new cluster within its + // instance, e.g., just `mycluster` rather than // `projects/myproject/instances/myinstance/clusters/mycluster`. ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Required. The cluster to be created. @@ -608,10 +607,11 @@ type ListClustersRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects/{project}/instances/{instance}`. - // Use `{instance} = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. + // Required. The unique name of the instance for which a list of clusters is + // requested. Values are of the form + // `projects/{project}/instances/{instance}`. Use `{instance} = '-'` to list + // Clusters for all Instances in a project, e.g., + // `projects/myproject/instances/-`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // DEPRECATED: This field is unused and ignored. PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` @@ -740,8 +740,8 @@ type DeleteClusterRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the cluster to be deleted. Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. + // Required. The unique name of the cluster to be deleted. Values are of the + // form `projects/{project}/instances/{instance}/clusters/{cluster}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1141,8 +1141,8 @@ type PartialUpdateClusterRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The Cluster which contains the partial updates to be applied, subject to - // the update_mask. + // Required. The Cluster which contains the partial updates to be applied, + // subject to the update_mask. Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // Required. The subset of Cluster fields which should be replaced. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` @@ -1200,12 +1200,11 @@ type CreateAppProfileRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance in which to create the new app + // profile. Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than + // Required. The ID to be used when referring to the new app profile within + // its instance, e.g., just `myprofile` rather than // `projects/myproject/instances/myinstance/appProfiles/myprofile`. AppProfileId string `protobuf:"bytes,2,opt,name=app_profile_id,json=appProfileId,proto3" json:"app_profile_id,omitempty"` // Required. The app profile to be created. @@ -1281,8 +1280,8 @@ type GetAppProfileRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the requested app profile. Values are of the form - // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. + // Required. The unique name of the requested app profile. Values are of the + // form `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1331,8 +1330,8 @@ type ListAppProfilesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance for which a list of app profiles is - // requested. Values are of the form + // Required. The unique name of the instance for which a list of app profiles + // is requested. Values are of the form // `projects/{project}/instances/{instance}`. // Use `{instance} = '-'` to list AppProfiles for all Instances in a project, // e.g., `projects/myproject/instances/-`. @@ -1550,7 +1549,8 @@ type DeleteAppProfileRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the app profile to be deleted. Values are of the form + // Required. The unique name of the app profile to be deleted. Values are of + // the form // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. If true, ignore safety checks when deleting the app profile. @@ -2553,20 +2553,20 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_goTypes = []inte (*ListHotTabletsResponse)(nil), // 26: google.bigtable.admin.v2.ListHotTabletsResponse nil, // 27: google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry (*CreateClusterMetadata_TableProgress)(nil), // 28: google.bigtable.admin.v2.CreateClusterMetadata.TableProgress - nil, // 29: google.bigtable.admin.v2.CreateClusterMetadata.TablesEntry - (*Instance)(nil), // 30: google.bigtable.admin.v2.Instance - (*fieldmaskpb.FieldMask)(nil), // 31: google.protobuf.FieldMask - (*Cluster)(nil), // 32: google.bigtable.admin.v2.Cluster - (*timestamppb.Timestamp)(nil), // 33: google.protobuf.Timestamp - (*AppProfile)(nil), // 34: google.bigtable.admin.v2.AppProfile - (*HotTablet)(nil), // 35: google.bigtable.admin.v2.HotTablet - (*v1.GetIamPolicyRequest)(nil), // 36: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 37: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 38: google.iam.v1.TestIamPermissionsRequest - (*longrunning.Operation)(nil), // 39: google.longrunning.Operation - (*emptypb.Empty)(nil), // 40: google.protobuf.Empty - (*v1.Policy)(nil), // 41: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 42: google.iam.v1.TestIamPermissionsResponse + nil, // 29: google.bigtable.admin.v2.CreateClusterMetadata.TablesEntry + (*Instance)(nil), // 30: google.bigtable.admin.v2.Instance + (*fieldmaskpb.FieldMask)(nil), // 31: google.protobuf.FieldMask + (*Cluster)(nil), // 32: google.bigtable.admin.v2.Cluster + (*timestamppb.Timestamp)(nil), // 33: google.protobuf.Timestamp + (*AppProfile)(nil), // 34: google.bigtable.admin.v2.AppProfile + (*HotTablet)(nil), // 35: google.bigtable.admin.v2.HotTablet + (*iampb.GetIamPolicyRequest)(nil), // 36: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 37: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 38: google.iam.v1.TestIamPermissionsRequest + (*longrunningpb.Operation)(nil), // 39: google.longrunning.Operation + (*emptypb.Empty)(nil), // 40: google.protobuf.Empty + (*iampb.Policy)(nil), // 41: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 42: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_depIdxs = []int32{ 30, // 0: google.bigtable.admin.v2.CreateInstanceRequest.instance:type_name -> google.bigtable.admin.v2.Instance @@ -3025,7 +3025,7 @@ type BigtableInstanceAdminClient interface { // serve_nodes is set to non-zero, then the cluster is manually scaled. If // cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is // enabled. - CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets information about an instance. GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) // Lists information about instances in a project. @@ -3036,7 +3036,7 @@ type BigtableInstanceAdminClient interface { UpdateInstance(ctx context.Context, in *Instance, opts ...grpc.CallOption) (*Instance, error) // Partially updates an instance within a project. This method can modify all // fields of an Instance and is the preferred way to update an Instance. - PartialUpdateInstance(ctx context.Context, in *PartialUpdateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + PartialUpdateInstance(ctx context.Context, in *PartialUpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Delete an instance from a project. DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates a cluster within an instance. @@ -3046,7 +3046,7 @@ type BigtableInstanceAdminClient interface { // serve_nodes is set to non-zero, then the cluster is manually scaled. If // cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is // enabled. - CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Gets information about a cluster. GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) // Lists information about clusters in an instance. @@ -3056,7 +3056,7 @@ type BigtableInstanceAdminClient interface { // Note that UpdateCluster does not support updating // cluster_config.cluster_autoscaling_config. In order to update it, you // must use PartialUpdateCluster. - UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*longrunning.Operation, error) + UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Partially updates a cluster within a project. This method is the preferred // way to update a Cluster. // @@ -3069,7 +3069,7 @@ type BigtableInstanceAdminClient interface { // // To disable autoscaling, clear cluster_config.cluster_autoscaling_config, // and explicitly set a serve_node count via the update_mask. - PartialUpdateCluster(ctx context.Context, in *PartialUpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + PartialUpdateCluster(ctx context.Context, in *PartialUpdateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Deletes a cluster from an instance. DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Creates an app profile within an instance. @@ -3079,17 +3079,17 @@ type BigtableInstanceAdminClient interface { // Lists information about app profiles in an instance. ListAppProfiles(ctx context.Context, in *ListAppProfilesRequest, opts ...grpc.CallOption) (*ListAppProfilesResponse, error) // Updates an app profile within an instance. - UpdateAppProfile(ctx context.Context, in *UpdateAppProfileRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) + UpdateAppProfile(ctx context.Context, in *UpdateAppProfileRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) // Deletes an app profile from an instance. DeleteAppProfile(ctx context.Context, in *DeleteAppProfileRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. - GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Sets the access control policy on an instance resource. Replaces any // existing policy. - SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Returns permissions that the caller has on the specified instance resource. - TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Lists hot tablets in a cluster, within the time range provided. Hot // tablets are ordered based on CPU usage. ListHotTablets(ctx context.Context, in *ListHotTabletsRequest, opts ...grpc.CallOption) (*ListHotTabletsResponse, error) @@ -3103,8 +3103,8 @@ func NewBigtableInstanceAdminClient(cc grpc.ClientConnInterface) BigtableInstanc return &bigtableInstanceAdminClient{cc} } -func (c *bigtableInstanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", in, out, opts...) if err != nil { return nil, err @@ -3139,8 +3139,8 @@ func (c *bigtableInstanceAdminClient) UpdateInstance(ctx context.Context, in *In return out, nil } -func (c *bigtableInstanceAdminClient) PartialUpdateInstance(ctx context.Context, in *PartialUpdateInstanceRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) PartialUpdateInstance(ctx context.Context, in *PartialUpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", in, out, opts...) if err != nil { return nil, err @@ -3157,8 +3157,8 @@ func (c *bigtableInstanceAdminClient) DeleteInstance(ctx context.Context, in *De return out, nil } -func (c *bigtableInstanceAdminClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", in, out, opts...) if err != nil { return nil, err @@ -3184,8 +3184,8 @@ func (c *bigtableInstanceAdminClient) ListClusters(ctx context.Context, in *List return out, nil } -func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", in, out, opts...) if err != nil { return nil, err @@ -3193,8 +3193,8 @@ func (c *bigtableInstanceAdminClient) UpdateCluster(ctx context.Context, in *Clu return out, nil } -func (c *bigtableInstanceAdminClient) PartialUpdateCluster(ctx context.Context, in *PartialUpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) PartialUpdateCluster(ctx context.Context, in *PartialUpdateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", in, out, opts...) if err != nil { return nil, err @@ -3238,8 +3238,8 @@ func (c *bigtableInstanceAdminClient) ListAppProfiles(ctx context.Context, in *L return out, nil } -func (c *bigtableInstanceAdminClient) UpdateAppProfile(ctx context.Context, in *UpdateAppProfileRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) { - out := new(longrunning.Operation) +func (c *bigtableInstanceAdminClient) UpdateAppProfile(ctx context.Context, in *UpdateAppProfileRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) { + out := new(longrunningpb.Operation) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", in, out, opts...) if err != nil { return nil, err @@ -3256,8 +3256,8 @@ func (c *bigtableInstanceAdminClient) DeleteAppProfile(ctx context.Context, in * return out, nil } -func (c *bigtableInstanceAdminClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *bigtableInstanceAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -3265,8 +3265,8 @@ func (c *bigtableInstanceAdminClient) GetIamPolicy(ctx context.Context, in *v1.G return out, nil } -func (c *bigtableInstanceAdminClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *bigtableInstanceAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -3274,8 +3274,8 @@ func (c *bigtableInstanceAdminClient) SetIamPolicy(ctx context.Context, in *v1.S return out, nil } -func (c *bigtableInstanceAdminClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { - out := new(v1.TestIamPermissionsResponse) +func (c *bigtableInstanceAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { + out := new(iampb.TestIamPermissionsResponse) err := c.cc.Invoke(ctx, "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", in, out, opts...) if err != nil { return nil, err @@ -3301,7 +3301,7 @@ type BigtableInstanceAdminServer interface { // serve_nodes is set to non-zero, then the cluster is manually scaled. If // cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is // enabled. - CreateInstance(context.Context, *CreateInstanceRequest) (*longrunning.Operation, error) + CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error) // Gets information about an instance. GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) // Lists information about instances in a project. @@ -3312,7 +3312,7 @@ type BigtableInstanceAdminServer interface { UpdateInstance(context.Context, *Instance) (*Instance, error) // Partially updates an instance within a project. This method can modify all // fields of an Instance and is the preferred way to update an Instance. - PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunning.Operation, error) + PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunningpb.Operation, error) // Delete an instance from a project. DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) // Creates a cluster within an instance. @@ -3322,7 +3322,7 @@ type BigtableInstanceAdminServer interface { // serve_nodes is set to non-zero, then the cluster is manually scaled. If // cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is // enabled. - CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error) + CreateCluster(context.Context, *CreateClusterRequest) (*longrunningpb.Operation, error) // Gets information about a cluster. GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) // Lists information about clusters in an instance. @@ -3332,7 +3332,7 @@ type BigtableInstanceAdminServer interface { // Note that UpdateCluster does not support updating // cluster_config.cluster_autoscaling_config. In order to update it, you // must use PartialUpdateCluster. - UpdateCluster(context.Context, *Cluster) (*longrunning.Operation, error) + UpdateCluster(context.Context, *Cluster) (*longrunningpb.Operation, error) // Partially updates a cluster within a project. This method is the preferred // way to update a Cluster. // @@ -3345,7 +3345,7 @@ type BigtableInstanceAdminServer interface { // // To disable autoscaling, clear cluster_config.cluster_autoscaling_config, // and explicitly set a serve_node count via the update_mask. - PartialUpdateCluster(context.Context, *PartialUpdateClusterRequest) (*longrunning.Operation, error) + PartialUpdateCluster(context.Context, *PartialUpdateClusterRequest) (*longrunningpb.Operation, error) // Deletes a cluster from an instance. DeleteCluster(context.Context, *DeleteClusterRequest) (*emptypb.Empty, error) // Creates an app profile within an instance. @@ -3355,17 +3355,17 @@ type BigtableInstanceAdminServer interface { // Lists information about app profiles in an instance. ListAppProfiles(context.Context, *ListAppProfilesRequest) (*ListAppProfilesResponse, error) // Updates an app profile within an instance. - UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunning.Operation, error) + UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunningpb.Operation, error) // Deletes an app profile from an instance. DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*emptypb.Empty, error) // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. - GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Sets the access control policy on an instance resource. Replaces any // existing policy. - SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Returns permissions that the caller has on the specified instance resource. - TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Lists hot tablets in a cluster, within the time range provided. Hot // tablets are ordered based on CPU usage. ListHotTablets(context.Context, *ListHotTabletsRequest) (*ListHotTabletsResponse, error) @@ -3375,7 +3375,7 @@ type BigtableInstanceAdminServer interface { type UnimplementedBigtableInstanceAdminServer struct { } -func (*UnimplementedBigtableInstanceAdminServer) CreateInstance(context.Context, *CreateInstanceRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateInstance not implemented") } func (*UnimplementedBigtableInstanceAdminServer) GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) { @@ -3387,13 +3387,13 @@ func (*UnimplementedBigtableInstanceAdminServer) ListInstances(context.Context, func (*UnimplementedBigtableInstanceAdminServer) UpdateInstance(context.Context, *Instance) (*Instance, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateInstance not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) PartialUpdateInstance(context.Context, *PartialUpdateInstanceRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method PartialUpdateInstance not implemented") } func (*UnimplementedBigtableInstanceAdminServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteInstance not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) CreateCluster(context.Context, *CreateClusterRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateCluster not implemented") } func (*UnimplementedBigtableInstanceAdminServer) GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) { @@ -3402,10 +3402,10 @@ func (*UnimplementedBigtableInstanceAdminServer) GetCluster(context.Context, *Ge func (*UnimplementedBigtableInstanceAdminServer) ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListClusters not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) UpdateCluster(context.Context, *Cluster) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) UpdateCluster(context.Context, *Cluster) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateCluster not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) PartialUpdateCluster(context.Context, *PartialUpdateClusterRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) PartialUpdateCluster(context.Context, *PartialUpdateClusterRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method PartialUpdateCluster not implemented") } func (*UnimplementedBigtableInstanceAdminServer) DeleteCluster(context.Context, *DeleteClusterRequest) (*emptypb.Empty, error) { @@ -3420,19 +3420,19 @@ func (*UnimplementedBigtableInstanceAdminServer) GetAppProfile(context.Context, func (*UnimplementedBigtableInstanceAdminServer) ListAppProfiles(context.Context, *ListAppProfilesRequest) (*ListAppProfilesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListAppProfiles not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunning.Operation, error) { +func (*UnimplementedBigtableInstanceAdminServer) UpdateAppProfile(context.Context, *UpdateAppProfileRequest) (*longrunningpb.Operation, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateAppProfile not implemented") } func (*UnimplementedBigtableInstanceAdminServer) DeleteAppProfile(context.Context, *DeleteAppProfileRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteAppProfile not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedBigtableInstanceAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedBigtableInstanceAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") } -func (*UnimplementedBigtableInstanceAdminServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { +func (*UnimplementedBigtableInstanceAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") } func (*UnimplementedBigtableInstanceAdminServer) ListHotTablets(context.Context, *ListHotTabletsRequest) (*ListHotTabletsResponse, error) { @@ -3750,7 +3750,7 @@ func _BigtableInstanceAdmin_DeleteAppProfile_Handler(srv interface{}, ctx contex } func _BigtableInstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.GetIamPolicyRequest) + in := new(iampb.GetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -3762,13 +3762,13 @@ func _BigtableInstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Co FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableInstanceAdminServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + return srv.(BigtableInstanceAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _BigtableInstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.SetIamPolicyRequest) + in := new(iampb.SetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -3780,13 +3780,13 @@ func _BigtableInstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Co FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableInstanceAdminServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + return srv.(BigtableInstanceAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _BigtableInstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.TestIamPermissionsRequest) + in := new(iampb.TestIamPermissionsRequest) if err := dec(in); err != nil { return nil, err } @@ -3798,7 +3798,7 @@ func _BigtableInstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx cont FullMethod: "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BigtableInstanceAdminServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + return srv.(BigtableInstanceAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) } return interceptor(ctx, in, info, handler) } diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go index 23212665937df..ba70105eb0456 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/bigtable/admin/v2/common.proto package admin diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go index b42ac1d9e98cf..d1e81d3829a1d 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/bigtable/admin/v2/instance.proto package admin @@ -217,6 +217,62 @@ func (Cluster_State) EnumDescriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{3, 0} } +// Possible priorities for an app profile. Note that higher priority writes +// can sometimes queue behind lower priority writes to the same tablet, as +// writes must be strictly sequenced in the durability log. +type AppProfile_Priority int32 + +const ( + // Default value. Mapped to PRIORITY_HIGH (the legacy behavior) on creation. + AppProfile_PRIORITY_UNSPECIFIED AppProfile_Priority = 0 + AppProfile_PRIORITY_LOW AppProfile_Priority = 1 + AppProfile_PRIORITY_MEDIUM AppProfile_Priority = 2 + AppProfile_PRIORITY_HIGH AppProfile_Priority = 3 +) + +// Enum value maps for AppProfile_Priority. +var ( + AppProfile_Priority_name = map[int32]string{ + 0: "PRIORITY_UNSPECIFIED", + 1: "PRIORITY_LOW", + 2: "PRIORITY_MEDIUM", + 3: "PRIORITY_HIGH", + } + AppProfile_Priority_value = map[string]int32{ + "PRIORITY_UNSPECIFIED": 0, + "PRIORITY_LOW": 1, + "PRIORITY_MEDIUM": 2, + "PRIORITY_HIGH": 3, + } +) + +func (x AppProfile_Priority) Enum() *AppProfile_Priority { + p := new(AppProfile_Priority) + *p = x + return p +} + +func (x AppProfile_Priority) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AppProfile_Priority) Descriptor() protoreflect.EnumDescriptor { + return file_google_bigtable_admin_v2_instance_proto_enumTypes[3].Descriptor() +} + +func (AppProfile_Priority) Type() protoreflect.EnumType { + return &file_google_bigtable_admin_v2_instance_proto_enumTypes[3] +} + +func (x AppProfile_Priority) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AppProfile_Priority.Descriptor instead. +func (AppProfile_Priority) EnumDescriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{4, 0} +} + // A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and // the resources that serve them. // All tables in an instance are served from all @@ -243,16 +299,16 @@ type Instance struct { // deployment strategies. They can be used to filter resources and aggregate // metrics. // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. + // - Label keys must be between 1 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. + // - Label values must be between 0 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. + // - No more than 64 labels can be associated with a given resource. + // - Keys and values must both be under 128 bytes. Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Output only. A server-assigned timestamp representing when this Instance was created. - // For instances created before this field was added (August 2021), this value - // is `seconds: 0, nanos: 1`. + // Output only. A server-assigned timestamp representing when this Instance + // was created. For instances created before this field was added (August + // 2021), this value is `seconds: 0, nanos: 1`. CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Reserved for future use. SatisfiesPzs *bool `protobuf:"varint,8,opt,name=satisfies_pzs,json=satisfiesPzs,proto3,oneof" json:"satisfies_pzs,omitempty"` @@ -352,7 +408,7 @@ type AutoscalingTargets struct { CpuUtilizationPercent int32 `protobuf:"varint,2,opt,name=cpu_utilization_percent,json=cpuUtilizationPercent,proto3" json:"cpu_utilization_percent,omitempty"` // The storage utilization that the Autoscaler should be trying to achieve. // This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD - // cluster and between 8192 (8TiB) and 16384 (16TiB) for an HDD cluster; + // cluster and between 8192 (8TiB) and 16384 (16TiB) for an HDD cluster, // otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, // it will be treated as if it were set to the default value: 2560 for SSD, // 8192 for HDD. @@ -474,8 +530,8 @@ type Cluster struct { // The unique name of the cluster. Values are of the form // `projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Immutable. The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this + // Immutable. The location where this cluster's nodes and storage reside. For + // best performance, clients should be located as close as possible to this // cluster. Currently only zones are supported, so values should be of the // form `projects/{project}/locations/{zone}`. Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` @@ -485,6 +541,7 @@ type Cluster struct { // throughput and more consistent performance. ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes,proto3" json:"serve_nodes,omitempty"` // Types that are assignable to Config: + // // *Cluster_ClusterConfig_ Config isCluster_Config `protobuf_oneof:"config"` // Immutable. The type of storage used by this cluster to serve its @@ -618,9 +675,17 @@ type AppProfile struct { // A value must be explicitly set. // // Types that are assignable to RoutingPolicy: + // // *AppProfile_MultiClusterRoutingUseAny_ // *AppProfile_SingleClusterRouting_ RoutingPolicy isAppProfile_RoutingPolicy `protobuf_oneof:"routing_policy"` + // Options for isolating this app profile's traffic from other use cases. + // + // Types that are assignable to Isolation: + // + // *AppProfile_Priority_ + // *AppProfile_StandardIsolation_ + Isolation isAppProfile_Isolation `protobuf_oneof:"isolation"` } func (x *AppProfile) Reset() { @@ -697,6 +762,28 @@ func (x *AppProfile) GetSingleClusterRouting() *AppProfile_SingleClusterRouting return nil } +func (m *AppProfile) GetIsolation() isAppProfile_Isolation { + if m != nil { + return m.Isolation + } + return nil +} + +// Deprecated: Do not use. +func (x *AppProfile) GetPriority() AppProfile_Priority { + if x, ok := x.GetIsolation().(*AppProfile_Priority_); ok { + return x.Priority + } + return AppProfile_PRIORITY_UNSPECIFIED +} + +func (x *AppProfile) GetStandardIsolation() *AppProfile_StandardIsolation { + if x, ok := x.GetIsolation().(*AppProfile_StandardIsolation_); ok { + return x.StandardIsolation + } + return nil +} + type isAppProfile_RoutingPolicy interface { isAppProfile_RoutingPolicy() } @@ -715,6 +802,30 @@ func (*AppProfile_MultiClusterRoutingUseAny_) isAppProfile_RoutingPolicy() {} func (*AppProfile_SingleClusterRouting_) isAppProfile_RoutingPolicy() {} +type isAppProfile_Isolation interface { + isAppProfile_Isolation() +} + +type AppProfile_Priority_ struct { + // This field has been deprecated in favor of `standard_isolation.priority`. + // If you set this field, `standard_isolation.priority` will be set instead. + // + // The priority of requests sent using this app profile. + // + // Deprecated: Do not use. + Priority AppProfile_Priority `protobuf:"varint,7,opt,name=priority,proto3,enum=google.bigtable.admin.v2.AppProfile_Priority,oneof"` +} + +type AppProfile_StandardIsolation_ struct { + // The standard options used for isolating this app profile's traffic from + // other use cases. + StandardIsolation *AppProfile_StandardIsolation `protobuf:"bytes,11,opt,name=standard_isolation,json=standardIsolation,proto3,oneof"` +} + +func (*AppProfile_Priority_) isAppProfile_Isolation() {} + +func (*AppProfile_StandardIsolation_) isAppProfile_Isolation() {} + // A tablet is a defined by a start and end key and is explained in // https://cloud.google.com/bigtable/docs/overview#architecture and // https://cloud.google.com/bigtable/docs/performance#optimization. @@ -739,10 +850,10 @@ type HotTablet struct { StartKey string `protobuf:"bytes,5,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` // Tablet End Key (inclusive). EndKey string `protobuf:"bytes,6,opt,name=end_key,json=endKey,proto3" json:"end_key,omitempty"` - // Output only. The average CPU usage spent by a node on this tablet over the start_time to - // end_time time range. The percentage is the amount of CPU used by the node - // to serve the tablet, from 0% (tablet was not interacted with) to 100% (the - // node spent all cycles serving the hot tablet). + // Output only. The average CPU usage spent by a node on this tablet over the + // start_time to end_time time range. The percentage is the amount of CPU used + // by the node to serve the tablet, from 0% (tablet was not interacted with) + // to 100% (the node spent all cycles serving the hot tablet). NodeCpuUsagePercent float32 `protobuf:"fixed32,7,opt,name=node_cpu_usage_percent,json=nodeCpuUsagePercent,proto3" json:"node_cpu_usage_percent,omitempty"` } @@ -943,12 +1054,13 @@ type Cluster_EncryptionConfig struct { // Describes the Cloud KMS encryption key that will be used to protect the // destination Bigtable cluster. The requirements for this key are: - // 1) The Cloud Bigtable service account associated with the project that - // contains this cluster must be granted the - // `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. - // 2) Only regional keys can be used and the region of the CMEK key must - // match the region of the cluster. - // 3) All clusters within an instance must use the same CMEK key. + // 1. The Cloud Bigtable service account associated with the project that + // contains this cluster must be granted the + // `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. + // 2. Only regional keys can be used and the region of the CMEK key must + // match the region of the cluster. + // 3. All clusters within an instance must use the same CMEK key. + // // Values are of the form // `projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}` KmsKeyName string `protobuf:"bytes,1,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"` @@ -1109,6 +1221,56 @@ func (x *AppProfile_SingleClusterRouting) GetAllowTransactionalWrites() bool { return false } +// Standard options for isolating this app profile's traffic from other use +// cases. +type AppProfile_StandardIsolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The priority of requests sent using this app profile. + Priority AppProfile_Priority `protobuf:"varint,1,opt,name=priority,proto3,enum=google.bigtable.admin.v2.AppProfile_Priority" json:"priority,omitempty"` +} + +func (x *AppProfile_StandardIsolation) Reset() { + *x = AppProfile_StandardIsolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppProfile_StandardIsolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppProfile_StandardIsolation) ProtoMessage() {} + +func (x *AppProfile_StandardIsolation) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_instance_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppProfile_StandardIsolation.ProtoReflect.Descriptor instead. +func (*AppProfile_StandardIsolation) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_instance_proto_rawDescGZIP(), []int{4, 2} +} + +func (x *AppProfile_StandardIsolation) GetPriority() AppProfile_Priority { + if x != nil { + return x.Priority + } + return AppProfile_PRIORITY_UNSPECIFIED +} + var File_google_bigtable_admin_v2_instance_proto protoreflect.FileDescriptor var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ @@ -1253,8 +1415,8 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x84, - 0x05, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x74, 0x65, 0x72, 0x7d, 0x42, 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x8b, + 0x08, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, @@ -1274,78 +1436,102 @@ var file_google_bigtable_admin_v2_instance_proto_rawDesc = []byte{ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, - 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x1a, - 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, - 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x73, 0x3a, 0x6f, 0xea, 0x41, 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, + 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x42, 0x02, 0x18, 0x01, 0x48, 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x12, 0x67, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x73, 0x6f, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, 0x52, 0x11, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3c, 0x0a, 0x19, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x55, 0x73, 0x65, 0x41, 0x6e, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x1a, 0x73, 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, + 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3c, + 0x0a, 0x1a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x18, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73, 0x1a, 0x5e, 0x0a, 0x11, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x49, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, + 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x5e, 0x0a, 0x08, + 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x49, 0x4f, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4c, + 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x50, 0x52, 0x49, + 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x48, 0x49, 0x47, 0x48, 0x10, 0x03, 0x3a, 0x6f, 0xea, 0x41, + 0x6c, 0x0a, 0x27, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, + 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, + 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, + 0x0b, 0x0a, 0x09, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd4, 0x03, 0x0a, + 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x12, 0x41, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x7d, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd4, 0x03, 0x0a, 0x09, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, - 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, - 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, - 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, - 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, - 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x7d, 0x42, 0xd0, 0x02, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, - 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, - 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, - 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, - 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, + 0x17, 0x0a, 0x07, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x16, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x02, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x6e, + 0x6f, 0x64, 0x65, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x3a, 0x7f, 0xea, 0x41, 0x7c, 0x0a, 0x26, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x48, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, + 0x52, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x2f, 0x7b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x7d, 0x2f, 0x68, 0x6f, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x7d, 0x42, 0xd0, 0x02, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, + 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, + 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0xea, 0x41, 0x78, 0x0a, 0x21, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, + 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, + 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1360,48 +1546,53 @@ func file_google_bigtable_admin_v2_instance_proto_rawDescGZIP() []byte { return file_google_bigtable_admin_v2_instance_proto_rawDescData } -var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_google_bigtable_admin_v2_instance_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_google_bigtable_admin_v2_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_google_bigtable_admin_v2_instance_proto_goTypes = []interface{}{ (Instance_State)(0), // 0: google.bigtable.admin.v2.Instance.State (Instance_Type)(0), // 1: google.bigtable.admin.v2.Instance.Type (Cluster_State)(0), // 2: google.bigtable.admin.v2.Cluster.State - (*Instance)(nil), // 3: google.bigtable.admin.v2.Instance - (*AutoscalingTargets)(nil), // 4: google.bigtable.admin.v2.AutoscalingTargets - (*AutoscalingLimits)(nil), // 5: google.bigtable.admin.v2.AutoscalingLimits - (*Cluster)(nil), // 6: google.bigtable.admin.v2.Cluster - (*AppProfile)(nil), // 7: google.bigtable.admin.v2.AppProfile - (*HotTablet)(nil), // 8: google.bigtable.admin.v2.HotTablet - nil, // 9: google.bigtable.admin.v2.Instance.LabelsEntry - (*Cluster_ClusterAutoscalingConfig)(nil), // 10: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - (*Cluster_ClusterConfig)(nil), // 11: google.bigtable.admin.v2.Cluster.ClusterConfig - (*Cluster_EncryptionConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.EncryptionConfig - (*AppProfile_MultiClusterRoutingUseAny)(nil), // 13: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - (*AppProfile_SingleClusterRouting)(nil), // 14: google.bigtable.admin.v2.AppProfile.SingleClusterRouting - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (StorageType)(0), // 16: google.bigtable.admin.v2.StorageType + (AppProfile_Priority)(0), // 3: google.bigtable.admin.v2.AppProfile.Priority + (*Instance)(nil), // 4: google.bigtable.admin.v2.Instance + (*AutoscalingTargets)(nil), // 5: google.bigtable.admin.v2.AutoscalingTargets + (*AutoscalingLimits)(nil), // 6: google.bigtable.admin.v2.AutoscalingLimits + (*Cluster)(nil), // 7: google.bigtable.admin.v2.Cluster + (*AppProfile)(nil), // 8: google.bigtable.admin.v2.AppProfile + (*HotTablet)(nil), // 9: google.bigtable.admin.v2.HotTablet + nil, // 10: google.bigtable.admin.v2.Instance.LabelsEntry + (*Cluster_ClusterAutoscalingConfig)(nil), // 11: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig + (*Cluster_ClusterConfig)(nil), // 12: google.bigtable.admin.v2.Cluster.ClusterConfig + (*Cluster_EncryptionConfig)(nil), // 13: google.bigtable.admin.v2.Cluster.EncryptionConfig + (*AppProfile_MultiClusterRoutingUseAny)(nil), // 14: google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + (*AppProfile_SingleClusterRouting)(nil), // 15: google.bigtable.admin.v2.AppProfile.SingleClusterRouting + (*AppProfile_StandardIsolation)(nil), // 16: google.bigtable.admin.v2.AppProfile.StandardIsolation + (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp + (StorageType)(0), // 18: google.bigtable.admin.v2.StorageType } var file_google_bigtable_admin_v2_instance_proto_depIdxs = []int32{ 0, // 0: google.bigtable.admin.v2.Instance.state:type_name -> google.bigtable.admin.v2.Instance.State 1, // 1: google.bigtable.admin.v2.Instance.type:type_name -> google.bigtable.admin.v2.Instance.Type - 9, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry - 15, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp + 10, // 2: google.bigtable.admin.v2.Instance.labels:type_name -> google.bigtable.admin.v2.Instance.LabelsEntry + 17, // 3: google.bigtable.admin.v2.Instance.create_time:type_name -> google.protobuf.Timestamp 2, // 4: google.bigtable.admin.v2.Cluster.state:type_name -> google.bigtable.admin.v2.Cluster.State - 11, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig - 16, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType - 12, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig - 13, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - 14, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting - 15, // 10: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp - 15, // 11: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp - 5, // 12: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits - 4, // 13: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets - 10, // 14: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 12, // 5: google.bigtable.admin.v2.Cluster.cluster_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterConfig + 18, // 6: google.bigtable.admin.v2.Cluster.default_storage_type:type_name -> google.bigtable.admin.v2.StorageType + 13, // 7: google.bigtable.admin.v2.Cluster.encryption_config:type_name -> google.bigtable.admin.v2.Cluster.EncryptionConfig + 14, // 8: google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any:type_name -> google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + 15, // 9: google.bigtable.admin.v2.AppProfile.single_cluster_routing:type_name -> google.bigtable.admin.v2.AppProfile.SingleClusterRouting + 3, // 10: google.bigtable.admin.v2.AppProfile.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority + 16, // 11: google.bigtable.admin.v2.AppProfile.standard_isolation:type_name -> google.bigtable.admin.v2.AppProfile.StandardIsolation + 17, // 12: google.bigtable.admin.v2.HotTablet.start_time:type_name -> google.protobuf.Timestamp + 17, // 13: google.bigtable.admin.v2.HotTablet.end_time:type_name -> google.protobuf.Timestamp + 6, // 14: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_limits:type_name -> google.bigtable.admin.v2.AutoscalingLimits + 5, // 15: google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig.autoscaling_targets:type_name -> google.bigtable.admin.v2.AutoscalingTargets + 11, // 16: google.bigtable.admin.v2.Cluster.ClusterConfig.cluster_autoscaling_config:type_name -> google.bigtable.admin.v2.Cluster.ClusterAutoscalingConfig + 3, // 17: google.bigtable.admin.v2.AppProfile.StandardIsolation.priority:type_name -> google.bigtable.admin.v2.AppProfile.Priority + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_google_bigtable_admin_v2_instance_proto_init() } @@ -1543,6 +1734,18 @@ func file_google_bigtable_admin_v2_instance_proto_init() { return nil } } + file_google_bigtable_admin_v2_instance_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppProfile_StandardIsolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_google_bigtable_admin_v2_instance_proto_msgTypes[0].OneofWrappers = []interface{}{} file_google_bigtable_admin_v2_instance_proto_msgTypes[3].OneofWrappers = []interface{}{ @@ -1551,14 +1754,16 @@ func file_google_bigtable_admin_v2_instance_proto_init() { file_google_bigtable_admin_v2_instance_proto_msgTypes[4].OneofWrappers = []interface{}{ (*AppProfile_MultiClusterRoutingUseAny_)(nil), (*AppProfile_SingleClusterRouting_)(nil), + (*AppProfile_Priority_)(nil), + (*AppProfile_StandardIsolation_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_bigtable_admin_v2_instance_proto_rawDesc, - NumEnums: 3, - NumMessages: 12, + NumEnums: 4, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 2992d1e44075e..a56350129e720 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.110.7 +# cloud.google.com/go v0.110.10 ## explicit; go 1.19 cloud.google.com/go cloud.google.com/go/internal @@ -13,17 +13,17 @@ cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute v1.23.0 +# cloud.google.com/go/compute v1.23.3 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.1 +# cloud.google.com/go/iam v1.1.5 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/longrunning v0.5.1 +# cloud.google.com/go/longrunning v0.5.4 ## explicit; go 1.19 cloud.google.com/go/longrunning cloud.google.com/go/longrunning/autogen @@ -36,12 +36,12 @@ cloud.google.com/go/pubsub/apiv1/pubsubpb cloud.google.com/go/pubsub/internal cloud.google.com/go/pubsub/internal/distribution cloud.google.com/go/pubsub/internal/scheduler -# cloud.google.com/go/storage v1.30.1 +# cloud.google.com/go/storage v1.35.1 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 -cloud.google.com/go/storage/internal/apiv2/stubs +cloud.google.com/go/storage/internal/apiv2/storagepb # github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 ## explicit; go 1.20 # github.com/Azure/azure-pipeline-go v0.2.3 @@ -52,15 +52,19 @@ github.com/Azure/azure-pipeline-go/pipeline github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network github.com/Azure/azure-sdk-for-go/version -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared @@ -70,10 +74,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 +github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -82,7 +87,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -135,7 +140,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -744,6 +749,9 @@ github.com/gogo/status # github.com/golang-jwt/jwt/v4 v4.5.0 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 +# github.com/golang-jwt/jwt/v5 v5.2.0 +## explicit; go 1.18 +github.com/golang-jwt/jwt/v5 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -771,7 +779,7 @@ github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-cmp v0.5.9 +# github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -791,8 +799,8 @@ github.com/google/pprof/profile # github.com/google/renameio/v2 v2.0.0 ## explicit; go 1.13 github.com/google/renameio/v2 -# github.com/google/s2a-go v0.1.4 -## explicit; go 1.16 +# github.com/google/s2a-go v0.1.7 +## explicit; go 1.19 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -812,11 +820,12 @@ github.com/google/s2a-go/internal/v2 github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/retry github.com/google/s2a-go/stream -# github.com/google/uuid v1.3.1 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.5 +# github.com/googleapis/enterprise-certificate-proxy v0.3.2 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -1209,7 +1218,7 @@ github.com/pierrec/lz4/v4/internal/lz4block github.com/pierrec/lz4/v4/internal/lz4errors github.com/pierrec/lz4/v4/internal/lz4stream github.com/pierrec/lz4/v4/internal/xxh32 -# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser # github.com/pkg/errors v0.9.1 @@ -1222,8 +1231,8 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.18 github.com/prometheus/alertmanager/api/v2/models github.com/prometheus/alertmanager/pkg/modtimevfs -# github.com/prometheus/client_golang v1.16.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.17.0 +## explicit; go 1.19 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus @@ -1234,7 +1243,7 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/push github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.4.0 +# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 ## explicit; go 1.18 github.com/prometheus/client_model/go # github.com/prometheus/common v0.44.0 @@ -1251,7 +1260,7 @@ github.com/prometheus/common/sigv4 # github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 ## explicit; go 1.18 github.com/prometheus/exporter-toolkit/web -# github.com/prometheus/procfs v0.11.0 +# github.com/prometheus/procfs v0.11.1 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -1392,8 +1401,8 @@ github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 -## explicit; go 1.18 +# github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 +## explicit; go 1.20 github.com/thanos-io/objstore github.com/thanos-io/objstore/exthttp github.com/thanos-io/objstore/providers/azure @@ -1576,7 +1585,7 @@ go.uber.org/zap/zapgrpc # go4.org/netipx v0.0.0-20230125063823-8449b0a6169f ## explicit; go 1.18 go4.org/netipx -# golang.org/x/crypto v0.17.0 +# golang.org/x/crypto v0.19.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1602,8 +1611,8 @@ golang.org/x/exp/slices # golang.org/x/mod v0.12.0 ## explicit; go 1.17 golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 +# golang.org/x/net v0.21.0 +## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -1622,22 +1631,24 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.11.0 +# golang.org/x/oauth2 v0.14.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.3.0 -## explicit; go 1.17 +# golang.org/x/sync v0.5.0 +## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.15.0 +# golang.org/x/sys v0.17.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1646,7 +1657,7 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog -# golang.org/x/term v0.15.0 +# golang.org/x/term v0.17.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -1673,8 +1684,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.4.0 +## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.11.0 ## explicit; go 1.18 @@ -1694,11 +1705,11 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 -## explicit; go 1.17 +# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 +## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.132.0 +# google.golang.org/api v0.150.0 ## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 @@ -1719,7 +1730,7 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -1729,11 +1740,9 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb +# google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 @@ -1743,12 +1752,12 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/expr/v1alpha1 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails From 3a2fe9ed800c31899aeff6a6d3b69d83494f363f Mon Sep 17 00:00:00 2001 From: Joao Marcal Date: Fri, 5 Jul 2024 16:08:01 +0100 Subject: [PATCH 09/38] Revert func signature of NewObjectClient and introduce NewObjectClientV2 --- pkg/bloombuild/common/tsdb.go | 10 ++- pkg/bloomcompactor/tsdb.go | 10 ++- pkg/logcli/query/query.go | 15 ++-- pkg/loki/modules.go | 22 +++++- pkg/storage/factory.go | 71 +++++++++++++++---- pkg/storage/factory_test.go | 9 ++- pkg/storage/store.go | 9 ++- .../stores/shipper/bloomshipper/store.go | 8 ++- .../boltdb/compactor/util_test.go | 5 +- pkg/tool/audit/audit.go | 10 ++- tools/tsdb/index-analyzer/main.go | 8 ++- tools/tsdb/migrate-versions/main.go | 9 ++- tools/tsdb/migrate-versions/main_test.go | 3 +- 13 files changed, 146 insertions(+), 43 deletions(-) diff --git a/pkg/bloombuild/common/tsdb.go b/pkg/bloombuild/common/tsdb.go index 233593cf5e908..acb80fdff553d 100644 --- a/pkg/bloombuild/common/tsdb.go +++ b/pkg/bloombuild/common/tsdb.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/v3/pkg/chunkenc" baseStore "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" @@ -191,8 +192,13 @@ func NewTSDBStores( for i, cfg := range schemaCfg.Configs { if cfg.IndexType == types.TSDBType { - - c, err := baseStore.NewObjectClient(component, cfg.ObjectType, storeCfg, clientMetrics, reg) + var c client.ObjectClient + var err error + if storeCfg.ThanosObjStore { + c, err = baseStore.NewObjectClientV2(component, cfg.ObjectType, storeCfg, clientMetrics, reg) + } else { + c, err = baseStore.NewObjectClient(cfg.ObjectType, storeCfg, clientMetrics) + } if err != nil { return nil, errors.Wrap(err, "failed to create object client") } diff --git a/pkg/bloomcompactor/tsdb.go b/pkg/bloomcompactor/tsdb.go index eb1bfee134107..26b5941689c72 100644 --- a/pkg/bloomcompactor/tsdb.go +++ b/pkg/bloomcompactor/tsdb.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/v3/pkg/chunkenc" baseStore "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" @@ -188,8 +189,13 @@ func NewTSDBStores( for i, cfg := range schemaCfg.Configs { if cfg.IndexType == types.TSDBType { - - c, err := baseStore.NewObjectClient("tsdb-store", cfg.ObjectType, storeCfg, clientMetrics, prometheus.DefaultRegisterer) + var c client.ObjectClient + var err error + if storeCfg.ThanosObjStore { + c, err = baseStore.NewObjectClientV2("tsdb-store", cfg.ObjectType, storeCfg, clientMetrics, prometheus.DefaultRegisterer) + } else { + c, err = baseStore.NewObjectClient(cfg.ObjectType, storeCfg, clientMetrics) + } if err != nil { return nil, errors.Wrap(err, "failed to create object client") } diff --git a/pkg/logcli/query/query.go b/pkg/logcli/query/query.go index f63ad632455e9..26f40414395b8 100644 --- a/pkg/logcli/query/query.go +++ b/pkg/logcli/query/query.go @@ -538,16 +538,17 @@ func (q *Query) DoLocalQuery(out output.LogOutput, statistics bool, orgID string } func GetObjectClient(store string, conf loki.Config, cm storage.ClientMetrics) (chunk.ObjectClient, error) { - oc, err := storage.NewObjectClient( - "log-cli-query", - store, - conf.StorageConfig, - cm, - prometheus.DefaultRegisterer) + var c chunk.ObjectClient + var err error + if conf.StorageConfig.ThanosObjStore { + c, err = storage.NewObjectClientV2("log-cli-query", store, conf.StorageConfig, cm, prometheus.DefaultRegisterer) + } else { + c, err = storage.NewObjectClient(store, conf.StorageConfig, cm) + } if err != nil { return nil, err } - return oc, nil + return c, nil } var errNotExists = stdErrors.New("doesn't exist") diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 91f8f88098900..aac10c58e3728 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1365,7 +1365,13 @@ func (t *Loki) initCompactor() (services.Service, error) { continue } - objectClient, err := storage.NewObjectClient("compactor", periodConfig.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer) + var objectClient client.ObjectClient + var err error + if t.Cfg.StorageConfig.ThanosObjStore { + objectClient, err = storage.NewObjectClientV2("compactor", periodConfig.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer) + } else { + objectClient, err = storage.NewObjectClient(periodConfig.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics) + } if err != nil { return nil, fmt.Errorf("failed to create object client: %w", err) } @@ -1376,7 +1382,12 @@ func (t *Loki) initCompactor() (services.Service, error) { var deleteRequestStoreClient client.ObjectClient if t.Cfg.CompactorConfig.RetentionEnabled { if deleteStore := t.Cfg.CompactorConfig.DeleteRequestStore; deleteStore != "" { - if deleteRequestStoreClient, err = storage.NewObjectClient("compactor", deleteStore, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer); err != nil { + if t.Cfg.StorageConfig.ThanosObjStore { + deleteRequestStoreClient, err = storage.NewObjectClientV2("compactor", deleteStore, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer) + } else { + deleteRequestStoreClient, err = storage.NewObjectClient(deleteStore, t.Cfg.StorageConfig, t.ClientMetrics) + } + if err != nil { return nil, fmt.Errorf("failed to create delete request store object client: %w", err) } } else { @@ -1713,7 +1724,12 @@ func (t *Loki) initAnalytics() (services.Service, error) { return nil, err } - objectClient, err := storage.NewObjectClient("analytics", period.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer) + var objectClient client.ObjectClient + if t.Cfg.StorageConfig.ThanosObjStore { + objectClient, err = storage.NewObjectClientV2("analytics", period.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics, prometheus.DefaultRegisterer) + } else { + objectClient, err = storage.NewObjectClient(period.ObjectType, t.Cfg.StorageConfig, t.ClientMetrics) + } if err != nil { level.Info(util_log.Logger).Log("msg", "failed to initialize usage report", "err", err) return nil, nil diff --git a/pkg/storage/factory.go b/pkg/storage/factory.go index 8f4c31f5b5723..31664c4a48152 100644 --- a/pkg/storage/factory.go +++ b/pkg/storage/factory.go @@ -404,7 +404,14 @@ func NewIndexClient(component string, periodCfg config.PeriodConfig, tableRange return client, nil } - objectClient, err := NewObjectClient(component, periodCfg.ObjectType, cfg, cm, registerer) + var objectClient client.ObjectClient + var err error + if cfg.ThanosObjStore { + objectClient, err = NewObjectClientV2(component, periodCfg.ObjectType, cfg, cm, registerer) + } else { + registerer = prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, registerer) + objectClient, err = NewObjectClient(periodCfg.ObjectType, cfg, cm) + } if err != nil { return nil, err } @@ -478,7 +485,13 @@ func NewChunkClient(component, name string, cfg Config, schemaCfg config.SchemaC case util.StringsContain(types.TestingStorageTypes, storeType): switch storeType { case types.StorageTypeInMemory: - c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) + var c client.ObjectClient + var err error + if cfg.ThanosObjStore { + c, err = NewObjectClientV2(component, name, cfg, clientMetrics, registerer) + } else { + c, err = NewObjectClient(name, cfg, clientMetrics) + } if err != nil { return nil, err } @@ -488,21 +501,39 @@ func NewChunkClient(component, name string, cfg Config, schemaCfg config.SchemaC case util.StringsContain(types.SupportedStorageTypes, storeType): switch storeType { case types.StorageTypeFileSystem: - c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) + var c client.ObjectClient + var err error + if cfg.ThanosObjStore { + c, err = NewObjectClientV2(component, name, cfg, clientMetrics, registerer) + } else { + c, err = NewObjectClient(name, cfg, clientMetrics) + } if err != nil { return nil, err } return client.NewClientWithMaxParallel(c, client.FSEncoder, cfg.MaxParallelGetChunk, schemaCfg), nil case types.StorageTypeAWS, types.StorageTypeS3, types.StorageTypeAzure, types.StorageTypeBOS, types.StorageTypeSwift, types.StorageTypeCOS, types.StorageTypeAlibabaCloud: - c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) + var c client.ObjectClient + var err error + if cfg.ThanosObjStore { + c, err = NewObjectClientV2(component, name, cfg, clientMetrics, registerer) + } else { + c, err = NewObjectClient(name, cfg, clientMetrics) + } if err != nil { return nil, err } return client.NewClientWithMaxParallel(c, nil, cfg.MaxParallelGetChunk, schemaCfg), nil case types.StorageTypeGCS: - c, err := NewObjectClient(component, name, cfg, clientMetrics, registerer) + var c client.ObjectClient + var err error + if cfg.ThanosObjStore { + c, err = NewObjectClientV2(component, name, cfg, clientMetrics, registerer) + } else { + c, err = NewObjectClient(name, cfg, clientMetrics) + } if err != nil { return nil, err } @@ -552,7 +583,13 @@ func NewTableClient(component, name string, periodCfg config.PeriodConfig, cfg C } case util.StringsContain(types.SupportedIndexTypes, name): - objectClient, err := NewObjectClient(component, periodCfg.ObjectType, cfg, cm, registerer) + var objectClient client.ObjectClient + var err error + if cfg.ThanosObjStore { + objectClient, err = NewObjectClientV2(component, periodCfg.ObjectType, cfg, cm, registerer) + } else { + objectClient, err = NewObjectClient(periodCfg.ObjectType, cfg, cm) + } if err != nil { return nil, err } @@ -607,7 +644,22 @@ func (c *ClientMetrics) Unregister() { } // NewObjectClient makes a new StorageClient with the prefix in the front. -func NewObjectClient(component, name string, cfg Config, clientMetrics ClientMetrics, reg prometheus.Registerer) (client.ObjectClient, error) { +func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error) { + actual, err := internalNewObjectClient("", name, cfg, clientMetrics, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + + if cfg.ObjectPrefix == "" { + return actual, nil + } else { + prefix := strings.Trim(cfg.ObjectPrefix, "/") + "/" + return client.NewPrefixedObjectClient(actual, prefix), nil + } +} + +// NewObjectClient makes a new StorageClient with the prefix in the front. +func NewObjectClientV2(component, name string, cfg Config, clientMetrics ClientMetrics, reg prometheus.Registerer) (client.ObjectClient, error) { actual, err := internalNewObjectClient(component, name, cfg, clientMetrics, reg) if err != nil { return nil, err @@ -628,11 +680,6 @@ func internalNewObjectClient(component, name string, cfg Config, clientMetrics C storeType = name ) - // preserve olf reg behaviour - if !cfg.ThanosObjStore { - reg = prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, reg) - } - // lookup storeType for named stores if nsType, ok := cfg.NamedStores.storeType[name]; ok { storeType = nsType diff --git a/pkg/storage/factory_test.go b/pkg/storage/factory_test.go index fc668c928ae36..3ac8658e8d6b2 100644 --- a/pkg/storage/factory_test.go +++ b/pkg/storage/factory_test.go @@ -8,7 +8,6 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/flagext" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -235,7 +234,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { var cfg Config flagext.DefaultValues(&cfg) - objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) + objectClient, err := NewObjectClient("inmemory", cfg, cm) require.NoError(t, err) _, ok := objectClient.(client.PrefixedObjectClient) @@ -247,7 +246,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "my/prefix/" - objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) + objectClient, err := NewObjectClient("inmemory", cfg, cm) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) @@ -260,7 +259,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "my/prefix" - objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) + objectClient, err := NewObjectClient("inmemory", cfg, cm) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) @@ -273,7 +272,7 @@ func TestNewObjectClient_prefixing(t *testing.T) { flagext.DefaultValues(&cfg) cfg.ObjectPrefix = "/my/prefix/" - objectClient, err := NewObjectClient("inmemory", "inmemory", cfg, cm, prometheus.NewRegistry()) + objectClient, err := NewObjectClient("inmemory", cfg, cm) require.NoError(t, err) prefixed, ok := objectClient.(client.PrefixedObjectClient) diff --git a/pkg/storage/store.go b/pkg/storage/store.go index cde9936ae90aa..e1c0efeb0fc55 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -229,6 +229,7 @@ func (s *LokiStore) chunkClientForPeriod(p config.PeriodConfig) (client.Client, if objectStoreType == "" { objectStoreType = p.IndexType } + var cc congestion.Controller ccCfg := s.cfg.CongestionControl @@ -283,7 +284,13 @@ func (s *LokiStore) storeForPeriod(p config.PeriodConfig, tableRange config.Tabl }, nil } - objectClient, err := NewObjectClient(component, p.ObjectType, s.cfg, s.clientMetrics, s.registerer) + var objectClient client.ObjectClient + var err error + if s.cfg.ThanosObjStore { + objectClient, err = NewObjectClientV2(component, p.ObjectType, s.cfg, s.clientMetrics, s.registerer) + } else { + objectClient, err = NewObjectClient(p.ObjectType, s.cfg, s.clientMetrics) + } if err != nil { return nil, nil, nil, err } diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go index 8b25d8f90b907..69cf552c419e9 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ b/pkg/storage/stores/shipper/bloomshipper/store.go @@ -334,7 +334,13 @@ func NewBloomStore( } for _, periodicConfig := range periodicConfigs { - objectClient, err := storage.NewObjectClient("bloom-store", periodicConfig.ObjectType, storageConfig, clientMetrics, prometheus.DefaultRegisterer) + var objectClient client.ObjectClient + var err error + if storageConfig.ThanosObjStore { + objectClient, err = storage.NewObjectClientV2("bloom-store", periodicConfig.ObjectType, storageConfig, clientMetrics, prometheus.DefaultRegisterer) + } else { + objectClient, err = storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics) + } if err != nil { return nil, errors.Wrapf(err, "creating object client for period %s", periodicConfig.From) } diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go index 82eaed6043b42..38bd567ff4a3a 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/util_test.go @@ -10,7 +10,6 @@ import ( ww "github.com/grafana/dskit/server" "github.com/grafana/dskit/user" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -129,11 +128,11 @@ type testObjectClient struct { } func newTestObjectClient(path string, clientMetrics storage.ClientMetrics) client.ObjectClient { - c, err := storage.NewObjectClient("compactor", "filesystem", storage.Config{ + c, err := storage.NewObjectClient("filesystem", storage.Config{ FSConfig: local.FSConfig{ Directory: path, }, - }, clientMetrics, prometheus.NewRegistry()) + }, clientMetrics) if err != nil { panic(err) } diff --git a/pkg/tool/audit/audit.go b/pkg/tool/audit/audit.go index fad5295690071..ddaceedef77cf 100644 --- a/pkg/tool/audit/audit.go +++ b/pkg/tool/audit/audit.go @@ -18,7 +18,6 @@ import ( "github.com/grafana/loki/v3/pkg/compactor" "github.com/grafana/loki/v3/pkg/compactor/retention" "github.com/grafana/loki/v3/pkg/storage" - loki_storage "github.com/grafana/loki/v3/pkg/storage" "github.com/grafana/loki/v3/pkg/storage/chunk/client" indexshipper_storage "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" shipperutil "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" @@ -54,8 +53,13 @@ func Run(ctx context.Context, cloudIndexPath, table string, cfg Config, logger l func GetObjectClient(cfg Config) (client.ObjectClient, error) { periodCfg := cfg.SchemaConfig.Configs[len(cfg.SchemaConfig.Configs)-1] // only check the last period. - - objClient, err := loki_storage.NewObjectClient("audit", periodCfg.ObjectType, cfg.StorageConfig, storage.NewClientMetrics(), prometheus.DefaultRegisterer) + var objClient client.ObjectClient + var err error + if cfg.StorageConfig.ThanosObjStore { + objClient, err = storage.NewObjectClientV2("audit", periodCfg.ObjectType, cfg.StorageConfig, storage.NewClientMetrics(), prometheus.DefaultRegisterer) + } else { + objClient, err = storage.NewObjectClient(periodCfg.ObjectType, cfg.StorageConfig, storage.NewClientMetrics()) + } if err != nil { return nil, fmt.Errorf("couldn't create object client: %w", err) } diff --git a/tools/tsdb/index-analyzer/main.go b/tools/tsdb/index-analyzer/main.go index a87ab60cbbcbf..aa1af033703eb 100644 --- a/tools/tsdb/index-analyzer/main.go +++ b/tools/tsdb/index-analyzer/main.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/grafana/loki/v3/pkg/storage" + "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" util_log "github.com/grafana/loki/v3/pkg/util/log" @@ -24,7 +25,12 @@ func main() { periodCfg, tableRange, tableName, err := helpers.GetPeriodConfigForTableNumber(bucket, conf.SchemaConfig.Configs) helpers.ExitErr("find period config for bucket", err) - objectClient, err := storage.NewObjectClient("index-analyzer", periodCfg.ObjectType, conf.StorageConfig, clientMetrics, prometheus.DefaultRegisterer) + var objectClient client.ObjectClient + if conf.StorageConfig.ThanosObjStore { + objectClient, err = storage.NewObjectClientV2("index-analyzer", periodCfg.ObjectType, conf.StorageConfig, clientMetrics, prometheus.DefaultRegisterer) + } else { + objectClient, err = storage.NewObjectClient(periodCfg.ObjectType, conf.StorageConfig, clientMetrics) + } helpers.ExitErr("creating object client", err) shipper, err := indexshipper.NewIndexShipper( diff --git a/tools/tsdb/migrate-versions/main.go b/tools/tsdb/migrate-versions/main.go index 01627d92fa1d6..3b4e3e2598a4b 100644 --- a/tools/tsdb/migrate-versions/main.go +++ b/tools/tsdb/migrate-versions/main.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/loki/v3/pkg/chunkenc" "github.com/grafana/loki/v3/pkg/loki" "github.com/grafana/loki/v3/pkg/storage" + "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/chunk/client/util" "github.com/grafana/loki/v3/pkg/storage/config" shipperindex "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/index" @@ -98,7 +99,13 @@ func main() { } func migrateTables(pCfg config.PeriodConfig, storageCfg storage.Config, clientMetrics storage.ClientMetrics, tableRange config.TableRange) error { - objClient, err := storage.NewObjectClient("tables-migration-tool", pCfg.ObjectType, storageCfg, clientMetrics, prometheus.DefaultRegisterer) + var objClient client.ObjectClient + var err error + if storageCfg.ThanosObjStore { + objClient, err = storage.NewObjectClientV2("tables-migration-tool", pCfg.ObjectType, storageCfg, clientMetrics, prometheus.DefaultRegisterer) + } else { + objClient, err = storage.NewObjectClient(pCfg.ObjectType, storageCfg, clientMetrics) + } if err != nil { return err } diff --git a/tools/tsdb/migrate-versions/main_test.go b/tools/tsdb/migrate-versions/main_test.go index 06d03530bab5c..62519e04f61fc 100644 --- a/tools/tsdb/migrate-versions/main_test.go +++ b/tools/tsdb/migrate-versions/main_test.go @@ -10,7 +10,6 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -53,7 +52,7 @@ func TestMigrateTables(t *testing.T) { } clientMetrics := storage.NewClientMetrics() - objClient, err := storage.NewObjectClient("tables-migration-tool", pcfg.ObjectType, storageCfg, clientMetrics, prometheus.DefaultRegisterer) + objClient, err := storage.NewObjectClient(pcfg.ObjectType, storageCfg, clientMetrics) require.NoError(t, err) indexStorageClient := shipperstorage.NewIndexStorageClient(objClient, pcfg.IndexTables.PathPrefix) From 4451f23c1da699edc6c87e84f9f570f212eb5a19 Mon Sep 17 00:00:00 2001 From: Kaviraj Date: Fri, 12 Jul 2024 13:45:27 +0200 Subject: [PATCH 10/38] fix the putObject api arguments Signed-off-by: Kaviraj --- pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go index 2c78748107025..b4e60ef2d7124 100644 --- a/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go +++ b/pkg/storage/chunk/client/gcp/gcs_thanos_object_client.go @@ -60,7 +60,7 @@ func (s *GCSThanosObjectClient) ObjectExists(ctx context.Context, objectKey stri } // PutObject puts the specified bytes into the configured GCS bucket at the provided key -func (s *GCSThanosObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { +func (s *GCSThanosObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error { return s.client.Upload(ctx, objectKey, object) } From f051abf19aef797f8d833cadce90b2e34495f40e Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Tue, 23 Jul 2024 07:15:53 +0200 Subject: [PATCH 11/38] vendor thanos with chunk-size patch Signed-off-by: Kaviraj Kanagaraj --- go.mod | 28 +- go.sum | 64 +- .../.release-please-manifest-individual.json | 10 +- .../.release-please-manifest-submodules.json | 271 +- .../go/.release-please-manifest.json | 2 +- vendor/cloud.google.com/go/CHANGES.md | 12 + vendor/cloud.google.com/go/CONTRIBUTING.md | 17 + .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/debug.md | 17 +- vendor/cloud.google.com/go/doc.go | 2 +- vendor/cloud.google.com/go/iam/CHANGES.md | 7 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 2 +- .../go/iam/apiv1/iampb/options.pb.go | 2 +- .../go/iam/apiv1/iampb/policy.pb.go | 2 +- .../go/internal/.repo-metadata-full.json | 60 + .../go/internal/trace/trace.go | 23 +- ...elease-please-config-yoshi-submodules.json | 15 + vendor/cloud.google.com/go/storage/CHANGES.md | 65 + vendor/cloud.google.com/go/storage/bucket.go | 30 +- vendor/cloud.google.com/go/storage/client.go | 3 + vendor/cloud.google.com/go/storage/doc.go | 9 +- .../go/storage/grpc_client.go | 434 +- .../go/storage/http_client.go | 44 +- .../go/storage/internal/apiv2/auxiliary.go | 2 +- .../go/storage/internal/apiv2/doc.go | 10 +- .../storage/internal/apiv2/storage_client.go | 20 +- .../internal/apiv2/storagepb/storage.pb.go | 2766 +- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 3 + vendor/cloud.google.com/go/storage/reader.go | 26 +- vendor/cloud.google.com/go/storage/storage.go | 49 +- vendor/cloud.google.com/go/storage/writer.go | 30 +- vendor/github.com/goccy/go-json/.codecov.yml | 32 + vendor/github.com/goccy/go-json/.gitignore | 2 + vendor/github.com/goccy/go-json/.golangci.yml | 86 + vendor/github.com/goccy/go-json/CHANGELOG.md | 425 + vendor/github.com/goccy/go-json/LICENSE | 21 + vendor/github.com/goccy/go-json/Makefile | 39 + vendor/github.com/goccy/go-json/README.md | 529 + vendor/github.com/goccy/go-json/color.go | 68 + vendor/github.com/goccy/go-json/decode.go | 263 + .../goccy/go-json/docker-compose.yml | 13 + vendor/github.com/goccy/go-json/encode.go | 326 + vendor/github.com/goccy/go-json/error.go | 41 + .../internal/decoder/anonymous_field.go | 41 + .../goccy/go-json/internal/decoder/array.go | 176 + .../goccy/go-json/internal/decoder/assign.go | 438 + .../goccy/go-json/internal/decoder/bool.go | 83 + .../goccy/go-json/internal/decoder/bytes.go | 118 + .../goccy/go-json/internal/decoder/compile.go | 487 + .../internal/decoder/compile_norace.go | 29 + .../go-json/internal/decoder/compile_race.go | 37 + .../goccy/go-json/internal/decoder/context.go | 254 + .../goccy/go-json/internal/decoder/float.go | 170 + .../goccy/go-json/internal/decoder/func.go | 146 + .../goccy/go-json/internal/decoder/int.go | 246 + .../go-json/internal/decoder/interface.go | 528 + .../goccy/go-json/internal/decoder/invalid.go | 55 + .../goccy/go-json/internal/decoder/map.go | 280 + .../goccy/go-json/internal/decoder/number.go | 123 + .../goccy/go-json/internal/decoder/option.go | 17 + .../goccy/go-json/internal/decoder/path.go | 670 + .../goccy/go-json/internal/decoder/ptr.go | 97 + .../goccy/go-json/internal/decoder/slice.go | 380 + .../goccy/go-json/internal/decoder/stream.go | 556 + .../goccy/go-json/internal/decoder/string.go | 452 + .../goccy/go-json/internal/decoder/struct.go | 845 + .../goccy/go-json/internal/decoder/type.go | 30 + .../goccy/go-json/internal/decoder/uint.go | 194 + .../internal/decoder/unmarshal_json.go | 104 + .../internal/decoder/unmarshal_text.go | 285 + .../internal/decoder/wrapped_string.go | 73 + .../goccy/go-json/internal/encoder/code.go | 1023 + .../goccy/go-json/internal/encoder/compact.go | 286 + .../go-json/internal/encoder/compiler.go | 935 + .../internal/encoder/compiler_norace.go | 32 + .../go-json/internal/encoder/compiler_race.go | 45 + .../goccy/go-json/internal/encoder/context.go | 105 + .../go-json/internal/encoder/decode_rune.go | 126 + .../goccy/go-json/internal/encoder/encoder.go | 596 + .../goccy/go-json/internal/encoder/indent.go | 211 + .../goccy/go-json/internal/encoder/int.go | 176 + .../goccy/go-json/internal/encoder/map112.go | 9 + .../goccy/go-json/internal/encoder/map113.go | 9 + .../goccy/go-json/internal/encoder/opcode.go | 752 + .../goccy/go-json/internal/encoder/option.go | 48 + .../goccy/go-json/internal/encoder/optype.go | 932 + .../goccy/go-json/internal/encoder/query.go | 135 + .../goccy/go-json/internal/encoder/string.go | 483 + .../go-json/internal/encoder/string_table.go | 415 + .../go-json/internal/encoder/vm/debug_vm.go | 41 + .../goccy/go-json/internal/encoder/vm/hack.go | 9 + .../goccy/go-json/internal/encoder/vm/util.go | 207 + .../goccy/go-json/internal/encoder/vm/vm.go | 4859 + .../internal/encoder/vm_color/debug_vm.go | 35 + .../go-json/internal/encoder/vm_color/hack.go | 9 + .../go-json/internal/encoder/vm_color/util.go | 274 + .../go-json/internal/encoder/vm_color/vm.go | 4859 + .../encoder/vm_color_indent/debug_vm.go | 35 + .../internal/encoder/vm_color_indent/util.go | 297 + .../internal/encoder/vm_color_indent/vm.go | 4859 + .../internal/encoder/vm_indent/debug_vm.go | 35 + .../internal/encoder/vm_indent/hack.go | 9 + .../internal/encoder/vm_indent/util.go | 230 + .../go-json/internal/encoder/vm_indent/vm.go | 4859 + .../goccy/go-json/internal/errors/error.go | 183 + .../goccy/go-json/internal/runtime/rtype.go | 262 + .../go-json/internal/runtime/struct_field.go | 91 + .../goccy/go-json/internal/runtime/type.go | 100 + vendor/github.com/goccy/go-json/json.go | 368 + vendor/github.com/goccy/go-json/option.go | 79 + vendor/github.com/goccy/go-json/path.go | 84 + vendor/github.com/goccy/go-json/query.go | 47 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 7 + .../googleapis/gax-go/v2/internal/version.go | 2 +- .../github.com/klauspost/compress/README.md | 7 + .../klauspost/compress/flate/matchlen_amd64.s | 10 +- .../compress/internal/snapref/encode_other.go | 2 +- .../klauspost/compress/s2/decode_arm64.s | 2 +- .../github.com/klauspost/compress/s2/index.go | 10 +- vendor/github.com/klauspost/compress/s2/s2.go | 6 +- .../klauspost/compress/s2/writer.go | 28 +- .../klauspost/compress/zlib/reader.go | 32 +- .../klauspost/compress/zlib/writer.go | 18 +- .../klauspost/compress/zstd/blockdec.go | 3 + .../klauspost/compress/zstd/blockenc.go | 20 + .../klauspost/compress/zstd/decoder.go | 2 +- .../klauspost/compress/zstd/dict.go | 31 + .../klauspost/compress/zstd/enc_best.go | 12 + .../klauspost/compress/zstd/enc_better.go | 13 +- .../zstd/internal/xxhash/xxhash_arm64.s | 4 +- .../klauspost/compress/zstd/matchlen_amd64.s | 10 +- .../github.com/klauspost/cpuid/v2/README.md | 15 +- vendor/github.com/klauspost/cpuid/v2/cpuid.go | 459 +- .../klauspost/cpuid/v2/detect_x86.go | 2 + .../klauspost/cpuid/v2/featureid_string.go | 413 +- vendor/github.com/minio/minio-go/v7/CREDITS | 1101 + vendor/github.com/minio/minio-go/v7/README.md | 118 +- .../minio-go/v7/api-bucket-notification.go | 3 +- .../minio/minio-go/v7/api-compose-object.go | 2 +- .../minio-go/v7/api-get-object-attributes.go | 201 + .../minio/minio-go/v7/api-get-object-file.go | 2 +- .../minio/minio-go/v7/api-get-object.go | 13 +- .../minio/minio-go/v7/api-get-options.go | 8 +- .../minio/minio-go/v7/api-object-tagging.go | 38 +- .../minio/minio-go/v7/api-put-object.go | 21 +- .../minio-go/v7/api-putobject-snowball.go | 27 + .../minio/minio-go/v7/api-remove.go | 2 +- vendor/github.com/minio/minio-go/v7/api.go | 52 +- .../github.com/minio/minio-go/v7/constants.go | 20 + .../minio/minio-go/v7/functional_tests.go | 1687 +- .../v7/pkg/credentials/assume_role.go | 11 +- .../v7/pkg/credentials/credentials.go | 11 +- .../pkg/credentials/file_aws_credentials.go | 1 + .../v7/pkg/credentials/file_minio_client.go | 4 +- .../minio-go/v7/pkg/credentials/iam_aws.go | 143 +- .../v7/pkg/credentials/sts_client_grants.go | 1 + .../v7/pkg/credentials/sts_custom_identity.go | 1 + .../v7/pkg/credentials/sts_ldap_identity.go | 1 + .../v7/pkg/credentials/sts_tls_identity.go | 1 + .../v7/pkg/credentials/sts_web_identity.go | 1 + .../minio-go/v7/pkg/encrypt/server-side.go | 3 +- .../minio-go/v7/pkg/lifecycle/lifecycle.go | 75 +- .../v7/pkg/notification/notification.go | 10 + .../v7/pkg/replication/replication.go | 227 +- .../minio/minio-go/v7/pkg/s3utils/utils.go | 67 +- .../minio/minio-go/v7/pkg/set/stringset.go | 15 +- .../minio/minio-go/v7/post-policy.go | 23 + vendor/github.com/minio/minio-go/v7/retry.go | 1 + .../minio/minio-go/v7/s3-endpoints.go | 182 +- vendor/github.com/minio/minio-go/v7/utils.go | 31 +- .../github.com/minio/sha256-simd/.gitignore | 1 - vendor/github.com/minio/sha256-simd/LICENSE | 202 - vendor/github.com/minio/sha256-simd/README.md | 137 - .../minio/sha256-simd/cpuid_other.go | 50 - vendor/github.com/minio/sha256-simd/sha256.go | 468 - .../sha256-simd/sha256blockAvx512_amd64.asm | 686 - .../sha256-simd/sha256blockAvx512_amd64.go | 501 - .../sha256-simd/sha256blockAvx512_amd64.s | 267 - .../minio/sha256-simd/sha256block_amd64.go | 31 - .../minio/sha256-simd/sha256block_amd64.s | 266 - .../minio/sha256-simd/sha256block_arm64.go | 37 - .../minio/sha256-simd/sha256block_arm64.s | 192 - .../minio/sha256-simd/sha256block_other.go | 29 - .../minio/sha256-simd/test-architectures.sh | 15 - .../github.com/thanos-io/objstore/.go-version | 2 +- .../thanos-io/objstore/CHANGELOG.md | 7 + .../github.com/thanos-io/objstore/README.md | 6 +- .../github.com/thanos-io/objstore/objstore.go | 2 +- .../objstore/providers/azure/azure.go | 25 +- .../thanos-io/objstore/providers/gcs/gcs.go | 46 +- .../thanos-io/objstore/providers/s3/s3.go | 47 +- .../v1/cloudresourcemanager-api.json | 4 +- .../v1/cloudresourcemanager-gen.go | 2 +- .../api/compute/v1/compute-api.json | 5715 +- .../api/compute/v1/compute-gen.go | 106606 ++++++++------- .../iamcredentials/v1/iamcredentials-api.json | 4 +- .../api/internal/settings.go | 9 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-api.json | 15 +- .../api/storage/v1/storage-gen.go | 31 +- .../googleapis/api/annotations/client.pb.go | 329 +- .../api/annotations/field_info.pb.go | 8 +- vendor/modules.txt | 44 +- 205 files changed, 104676 insertions(+), 57261 deletions(-) create mode 100644 vendor/github.com/goccy/go-json/.codecov.yml create mode 100644 vendor/github.com/goccy/go-json/.gitignore create mode 100644 vendor/github.com/goccy/go-json/.golangci.yml create mode 100644 vendor/github.com/goccy/go-json/CHANGELOG.md create mode 100644 vendor/github.com/goccy/go-json/LICENSE create mode 100644 vendor/github.com/goccy/go-json/Makefile create mode 100644 vendor/github.com/goccy/go-json/README.md create mode 100644 vendor/github.com/goccy/go-json/color.go create mode 100644 vendor/github.com/goccy/go-json/decode.go create mode 100644 vendor/github.com/goccy/go-json/docker-compose.yml create mode 100644 vendor/github.com/goccy/go-json/encode.go create mode 100644 vendor/github.com/goccy/go-json/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/array.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/assign.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bool.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/bytes.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/compile_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/float.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/func.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/interface.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/invalid.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/map.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/number.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/path.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/ptr.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/slice.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/stream.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/struct.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/type.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/uint.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go create mode 100644 vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/code.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compact.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/context.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/encoder.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/indent.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/int.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map112.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/map113.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/opcode.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/option.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/optype.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/query.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/string_table.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go create mode 100644 vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go create mode 100644 vendor/github.com/goccy/go-json/internal/errors/error.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/rtype.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/struct_field.go create mode 100644 vendor/github.com/goccy/go-json/internal/runtime/type.go create mode 100644 vendor/github.com/goccy/go-json/json.go create mode 100644 vendor/github.com/goccy/go-json/option.go create mode 100644 vendor/github.com/goccy/go-json/path.go create mode 100644 vendor/github.com/goccy/go-json/query.go create mode 100644 vendor/github.com/minio/minio-go/v7/CREDITS create mode 100644 vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go delete mode 100644 vendor/github.com/minio/sha256-simd/.gitignore delete mode 100644 vendor/github.com/minio/sha256-simd/LICENSE delete mode 100644 vendor/github.com/minio/sha256-simd/README.md delete mode 100644 vendor/github.com/minio/sha256-simd/cpuid_other.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_amd64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_amd64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.go delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.s delete mode 100644 vendor/github.com/minio/sha256-simd/sha256block_other.go delete mode 100644 vendor/github.com/minio/sha256-simd/test-architectures.sh diff --git a/go.mod b/go.mod index d3683035e722b..577f886ec867d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.22.4 require ( cloud.google.com/go/bigtable v1.18.1 cloud.google.com/go/pubsub v1.36.1 - cloud.google.com/go/storage v1.36.0 + cloud.google.com/go/storage v1.40.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/go-autorest/autorest/adal v0.9.23 @@ -64,11 +64,11 @@ require ( github.com/jmespath/go-jmespath v0.4.0 github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.7 + github.com/klauspost/compress v1.17.9 github.com/klauspost/pgzip v1.2.5 github.com/leodido/go-syslog/v4 v4.1.0 github.com/mattn/go-ieproxy v0.0.1 - github.com/minio/minio-go/v7 v7.0.61 + github.com/minio/minio-go/v7 v7.0.72 github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/mapstructure v1.5.0 github.com/modern-go/reflect2 v1.0.2 @@ -104,7 +104,7 @@ require ( golang.org/x/sync v0.7.0 golang.org/x/sys v0.21.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.168.0 + google.golang.org/api v0.172.0 google.golang.org/grpc v1.62.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 @@ -139,7 +139,7 @@ require ( github.com/richardartoul/molecule v1.0.0 github.com/schollz/progressbar/v3 v3.14.2 github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 - github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 + github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8 github.com/willf/bloom v2.0.3+incompatible go.opentelemetry.io/collector/pdata v1.3.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f @@ -160,6 +160,7 @@ require ( github.com/dlclark/regexp2 v1.4.0 // indirect github.com/go-kit/kit v0.12.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect @@ -172,10 +173,10 @@ require ( ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.4 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/iam v1.1.7 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect @@ -264,7 +265,7 @@ require ( github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gophercloud/gophercloud v1.8.0 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect @@ -290,7 +291,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect @@ -299,7 +300,6 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/miekg/dns v1.1.58 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect @@ -348,9 +348,9 @@ require ( golang.org/x/term v0.21.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index d9df9c9cc1f9e..f583b70bcd432 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9 cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= @@ -60,8 +60,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= -cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= @@ -82,10 +82,10 @@ cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFP cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.6 h1:ktpEMQmsOAYj3VZwH020FcQlm23BVYg8T8O1woG2GcE= -cloud.google.com/go/kms v1.15.6/go.mod h1:yF75jttnIdHfGBoE51AKsD/Yqf+/jICzB9v1s1acsms= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= +cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= @@ -127,8 +127,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= @@ -400,6 +400,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= +github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= @@ -638,6 +640,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= +github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= @@ -842,6 +846,8 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= @@ -999,8 +1005,8 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -1288,12 +1294,12 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= @@ -1397,10 +1403,8 @@ github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPk github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI= -github.com/minio/minio-go/v7 v7.0.61/go.mod h1:BTu8FcrEw+HidY0zd/0eny43QnVNkXRPXrLXFuQBHXg= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/minio/minio-go/v7 v7.0.72 h1:ZSbxs2BfJensLyHdVOgHv+pfmvxYraaUy07ER04dWnA= +github.com/minio/minio-go/v7 v7.0.72/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= @@ -1771,8 +1775,8 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955u github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM= github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3 h1:Q0BjHI7FMe5KkKVXBFYto5VNASxiA/+AEhHup/IT7N0= -github.com/thanos-io/objstore v0.0.0-20240309075357-e8336a5fd5f3/go.mod h1:ptMYNPgbyAR7a2Ab2t7zHA2/0be2ePyawVR7lp7fZtg= +github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8 h1:QAgAQPtOj3OTlNKrm7G/xPeuDa8xz7brfNHv3WTUq6I= +github.com/thanos-io/objstore v0.0.0-20240722162417-19b0c0f0ffd8/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -2468,8 +2472,8 @@ google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOI google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY= -google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= +google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2584,12 +2588,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json index bcbd757af9619..889d612569e38 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -1,16 +1,16 @@ { "auth": "0.1.0", "auth/oauth2adapt": "0.1.0", - "bigquery": "1.57.1", + "bigquery": "1.59.1", "bigtable": "1.21.0", "datastore": "1.15.0", "errorreporting": "0.3.0", "firestore": "1.14.0", "logging": "1.9.0", "profiler": "0.4.0", - "pubsub": "1.33.0", + "pubsub": "1.36.1", "pubsublite": "1.8.1", - "spanner": "1.55.0", - "storage": "1.36.0", - "vertexai": "0.6.0" + "spanner": "1.57.0", + "storage": "1.38.0", + "vertexai": "0.7.1" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index b761f5a7fcfee..44d4468a4d92f 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,137 +1,142 @@ { - "accessapproval": "1.7.4", - "accesscontextmanager": "1.8.4", - "advisorynotifications": "1.3.0", - "ai": "0.3.0", - "aiplatform": "1.58.0", - "alloydb": "1.8.0", - "analytics": "0.22.0", - "apigateway": "1.6.4", - "apigeeconnect": "1.6.4", - "apigeeregistry": "0.8.2", - "apikeys": "1.1.4", - "appengine": "1.8.4", - "apps": "0.2.0", - "area120": "0.8.4", - "artifactregistry": "1.14.6", - "asset": "1.17.0", - "assuredworkloads": "1.11.4", - "automl": "1.13.4", - "baremetalsolution": "1.2.3", - "batch": "1.7.0", - "beyondcorp": "1.0.3", - "billing": "1.18.0", - "binaryauthorization": "1.8.0", - "certificatemanager": "1.7.4", - "channel": "1.17.4", - "cloudbuild": "1.15.0", - "clouddms": "1.7.3", - "cloudprofiler": "0.2.0", - "cloudquotas": "0.1.0", - "cloudtasks": "1.12.4", - "commerce": "0.1.3", - "compute": "1.23.3", + "accessapproval": "1.7.5", + "accesscontextmanager": "1.8.5", + "advisorynotifications": "1.3.1", + "ai": "0.3.2", + "aiplatform": "1.61.0", + "alloydb": "1.8.1", + "analytics": "0.23.0", + "apigateway": "1.6.5", + "apigeeconnect": "1.6.5", + "apigeeregistry": "0.8.3", + "apikeys": "1.1.5", + "appengine": "1.8.5", + "apps": "0.4.0", + "area120": "0.8.5", + "artifactregistry": "1.14.7", + "asset": "1.17.2", + "assuredworkloads": "1.11.5", + "automl": "1.13.5", + "baremetalsolution": "1.2.4", + "batch": "1.8.1", + "beyondcorp": "1.0.4", + "billing": "1.18.3", + "binaryauthorization": "1.8.1", + "certificatemanager": "1.7.5", + "channel": "1.17.5", + "cloudbuild": "1.15.1", + "clouddms": "1.7.4", + "cloudprofiler": "0.2.1", + "cloudquotas": "0.1.1", + "cloudtasks": "1.12.6", + "commerce": "0.1.4", + "compute": "1.24.0", "compute/metadata": "0.2.3", - "confidentialcomputing": "1.4.0", - "config": "0.1.4", - "contactcenterinsights": "1.12.1", - "container": "1.29.0", - "containeranalysis": "0.11.3", - "datacatalog": "1.19.1", - "dataflow": "0.9.4", - "dataform": "0.9.1", - "datafusion": "1.7.4", - "datalabeling": "0.8.4", - "dataplex": "1.14.0", - "dataproc": "2.3.0", - "dataqna": "0.8.4", - "datastream": "1.10.3", - "deploy": "1.17.0", - "dialogflow": "1.48.0", - "discoveryengine": "1.4.0", - "dlp": "1.11.1", - "documentai": "1.23.7", - "domains": "0.9.4", - "edgecontainer": "1.1.4", - "edgenetwork": "0.1.0", - "essentialcontacts": "1.6.5", - "eventarc": "1.13.3", - "filestore": "1.8.0", - "functions": "1.15.4", - "gkebackup": "1.3.4", - "gkeconnect": "0.8.4", - "gkehub": "0.14.4", - "gkemulticloud": "1.1.0", + "confidentialcomputing": "1.4.1", + "config": "0.2.1", + "contactcenterinsights": "1.13.0", + "container": "1.31.0", + "containeranalysis": "0.11.4", + "datacatalog": "1.19.3", + "dataflow": "0.9.5", + "dataform": "0.9.2", + "datafusion": "1.7.5", + "datalabeling": "0.8.5", + "dataplex": "1.14.2", + "dataproc": "2.4.0", + "dataqna": "0.8.5", + "datastream": "1.10.4", + "deploy": "1.17.1", + "dialogflow": "1.49.0", + "discoveryengine": "1.5.0", + "dlp": "1.11.2", + "documentai": "1.26.0", + "domains": "0.9.5", + "edgecontainer": "1.1.5", + "edgenetwork": "0.2.1", + "essentialcontacts": "1.6.6", + "eventarc": "1.13.4", + "filestore": "1.8.1", + "functions": "1.16.0", + "gkebackup": "1.3.5", + "gkeconnect": "0.8.5", + "gkehub": "0.14.5", + "gkemulticloud": "1.1.1", "grafeas": "0.3.4", - "gsuiteaddons": "1.6.4", - "iam": "1.1.5", - "iap": "1.9.3", - "ids": "1.4.4", - "iot": "1.7.4", - "kms": "1.15.5", - "language": "1.12.2", - "lifesciences": "0.9.4", - "longrunning": "0.5.4", - "managedidentities": "1.6.4", - "maps": "1.6.2", - "mediatranslation": "0.8.4", - "memcache": "1.10.4", - "metastore": "1.13.3", - "migrationcenter": "0.2.3", - "monitoring": "1.17.0", - "netapp": "0.2.3", - "networkconnectivity": "1.14.3", - "networkmanagement": "1.9.3", - "networksecurity": "0.9.4", - "notebooks": "1.11.2", - "optimization": "1.6.2", - "orchestration": "1.8.4", - "orgpolicy": "1.12.0", - "osconfig": "1.12.4", - "oslogin": "1.12.2", - "phishingprotection": "0.8.4", - "policysimulator": "0.2.2", - "policytroubleshooter": "1.10.2", - "privatecatalog": "0.9.4", - "rapidmigrationassessment": "1.0.4", - "recaptchaenterprise": "2.9.0", - "recommendationengine": "0.8.4", - "recommender": "1.12.0", - "redis": "1.14.1", - "resourcemanager": "1.9.4", - "resourcesettings": "1.6.4", - "retail": "1.14.4", - "run": "1.3.3", - "scheduler": "1.10.5", - "secretmanager": "1.11.4", - "securesourcemanager": "0.1.2", - "security": "1.15.4", - "securitycenter": "1.24.3", - "securitycentermanagement": "0.1.1", - "servicecontrol": "1.12.4", - "servicedirectory": "1.11.3", - "servicemanagement": "1.9.5", - "serviceusage": "1.8.3", - "shell": "1.7.4", - "shopping": "0.3.0", - "speech": "1.21.0", - "storageinsights": "1.0.4", - "storagetransfer": "1.10.3", - "support": "1.0.3", - "talent": "1.6.5", - "telcoautomation": "0.1.1", - "texttospeech": "1.7.4", - "tpu": "1.6.4", - "trace": "1.10.4", - "translate": "1.10.0", - "video": "1.20.3", - "videointelligence": "1.11.4", - "vision": "2.7.5", - "vmmigration": "1.7.4", - "vmwareengine": "1.0.3", - "vpcaccess": "1.7.4", - "webrisk": "1.9.4", - "websecurityscanner": "1.6.4", - "workflows": "1.12.3", - "workstations": "0.5.3" + "gsuiteaddons": "1.6.5", + "iam": "1.1.6", + "iap": "1.9.4", + "ids": "1.4.5", + "iot": "1.7.5", + "kms": "1.15.7", + "language": "1.12.3", + "lifesciences": "0.9.5", + "longrunning": "0.5.5", + "managedidentities": "1.6.5", + "maps": "1.7.0", + "mediatranslation": "0.8.5", + "memcache": "1.10.5", + "metastore": "1.13.4", + "migrationcenter": "0.2.4", + "monitoring": "1.18.0", + "netapp": "0.2.4", + "networkconnectivity": "1.14.4", + "networkmanagement": "1.9.4", + "networksecurity": "0.9.5", + "notebooks": "1.11.3", + "optimization": "1.6.3", + "orchestration": "1.9.0", + "orgpolicy": "1.12.1", + "osconfig": "1.12.5", + "oslogin": "1.13.1", + "parallelstore": "0.1.0", + "phishingprotection": "0.8.5", + "policysimulator": "0.2.3", + "policytroubleshooter": "1.10.3", + "privatecatalog": "0.9.5", + "rapidmigrationassessment": "1.0.5", + "recaptchaenterprise": "2.10.0", + "recommendationengine": "0.8.5", + "recommender": "1.12.1", + "redis": "1.14.2", + "resourcemanager": "1.9.5", + "resourcesettings": "1.6.5", + "retail": "1.16.0", + "run": "1.3.4", + "scheduler": "1.10.6", + "secretmanager": "1.11.5", + "securesourcemanager": "0.1.3", + "security": "1.15.5", + "securitycenter": "1.25.0", + "securitycentermanagement": "0.1.4", + "securityposture": "0.1.1", + "servicecontrol": "1.13.0", + "servicedirectory": "1.11.4", + "servicehealth": "0.1.2", + "servicemanagement": "1.9.6", + "serviceusage": "1.8.4", + "shell": "1.7.5", + "shopping": "0.3.1", + "spanner/test/opentelemetry/test": "0.0.0", + "speech": "1.21.1", + "storageinsights": "1.0.5", + "storagetransfer": "1.10.4", + "support": "1.0.4", + "talent": "1.6.6", + "telcoautomation": "0.2.0", + "texttospeech": "1.7.5", + "tpu": "1.6.5", + "trace": "1.10.5", + "translate": "1.10.1", + "video": "1.20.4", + "videointelligence": "1.11.5", + "vision": "2.8.0", + "visionai": "0.1.0", + "vmmigration": "1.7.5", + "vmwareengine": "1.1.1", + "vpcaccess": "1.7.5", + "webrisk": "1.9.5", + "websecurityscanner": "1.6.5", + "workflows": "1.12.4", + "workstations": "0.5.4" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 333e70a10a79a..1e4d1cb825524 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.112.0" + ".": "0.112.1" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 2a2b4cbb43fcb..0a61afeb8c29c 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,17 @@ # Changes +## [0.112.1](https://github.com/googleapis/google-cloud-go/compare/v0.112.0...v0.112.1) (2024-02-26) + + +### Bug Fixes + +* **internal/postprocessor:** Handle googleapis link in commit body ([#9251](https://github.com/googleapis/google-cloud-go/issues/9251)) ([1dd3515](https://github.com/googleapis/google-cloud-go/commit/1dd35157bff871a2b3e5b0e3cac33502737fd631)) + + +### Documentation + +* **main:** Add OpenTelemetry-Go compatibility warning to debug.md ([#9268](https://github.com/googleapis/google-cloud-go/issues/9268)) ([18f9bb9](https://github.com/googleapis/google-cloud-go/commit/18f9bb94fbc239255a873b29462fc7c2eac3c0aa)), refs [#9267](https://github.com/googleapis/google-cloud-go/issues/9267) + ## [0.112.0](https://github.com/googleapis/google-cloud-go/compare/v0.111.0...v0.112.0) (2024-01-11) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index d07f81f1797f1..a710e169b4955 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -47,6 +47,22 @@ Commits will be squashed when they're merged. +## Policy on new dependencies + +While the Go ecosystem is rich with useful modules, in this project we try to +minimize the number of direct dependencies we have on modules that are not +Google-owned. + +Adding new third party dependencies can have the following effects: +* broadens the vulnerability surface +* increases so called "vanity" import routing infrastructure failure points +* increases complexity of our own [`third_party`][] imports + +So if you are contributing, please either contribute the full implementation +directly, or find a Google-owned project that provides the functionality. Of +course, there may be exceptions to this rule, but those should be well defined +and agreed upon by the maintainers ahead of time. + ## Testing We test code against two versions of Go, the minimum and maximum versions @@ -343,3 +359,4 @@ available at [https://contributor-covenant.org/version/1/2/0/](https://contribut [gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ [indvcla]: https://developers.google.com/open-source/cla/individual [corpcla]: https://developers.google.com/open-source/cla/corporate +[`third_party`]: https://opensource.google/documentation/reference/thirdparty diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 27a1970b9d825..291a237fe1cda 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.23.4" +const Version = "1.24.0" diff --git a/vendor/cloud.google.com/go/debug.md b/vendor/cloud.google.com/go/debug.md index 0c9826544c929..beec915552515 100644 --- a/vendor/cloud.google.com/go/debug.md +++ b/vendor/cloud.google.com/go/debug.md @@ -1,6 +1,6 @@ # Logging, Debugging and Telemetry -**Warning:The OpenCensus project is obsolete and was archived on July 31st, +**Warning: The OpenCensus project is obsolete and was archived on July 31st, 2023.** This means that any security vulnerabilities that are found will not be patched. We recommend that you begin migrating to OpenCensus tracing to OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for @@ -16,7 +16,7 @@ into a system's health. ## Logging and debugging -While working with the Go Client libraries you may run into some situations +While working with the Go Client Libraries you may run into some situations where you need a deeper level of understanding about what is going on in order to solve your problem. Here are some tips and tricks that you can use in these cases. *Note* that many of the tips in this section will have a performance @@ -179,7 +179,7 @@ func main() { ## Telemetry -**Warning:The OpenCensus project is obsolete and was archived on July 31st, +**Warning: The OpenCensus project is obsolete and was archived on July 31st, 2023.** This means that any security vulnerabilities that are found will not be patched. We recommend that you begin migrating to OpenCensus tracing to OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for @@ -211,7 +211,7 @@ OpenTelemetry support via an environment variable, as described below. #### OpenCensus -**Warning:The OpenCensus project is obsolete and was archived on July 31st, +**Warning: The OpenCensus project is obsolete and was archived on July 31st, 2023.** This means that any security vulnerabilities that are found will not be patched. We recommend that you begin migrating to OpenCensus tracing to OpenTelemetry, the successor project. @@ -252,6 +252,15 @@ Please refer to the following resources: #### OpenTelemetry +**Warning: OpenTelemetry-Go ensures +[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility) +with ONLY the current supported versions of the [Go +language](https://go.dev/doc/devel/release#policy). This support may be narrower +than the support that has been offered historically by the Go Client Libraries. +Ensure that your Go runtime version is supported by the OpenTelemetry-Go +[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility) +policy before enabling OpenTelemetry instrumentation.** + To opt-in to experimental OpenTelemetry tracing currently available in the clients listed above, set the environment variable `GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING` to the case-insensitive diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index a90d072f0f056..66fcc4b6587a5 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -250,7 +250,7 @@ situations, including: [testing against fake servers]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes [Vertex AI - Locations]: https://cloud.google.com/vertex-ai/docs/general/locations [Google Application Default Credentials]: https://cloud.google.com/docs/authentication/external/set-up-adc -[Logging, Debugging and Telemetry Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md [Testing Guide]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md +[Debugging Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md */ package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 43a1793848697..11cf3ce4f64a5 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14) + + +### Bug Fixes + +* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + ## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index b5243e61291d3..6916cfa45a8d2 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v4.25.2 // source: google/iam/v1/iam_policy.proto package iampb diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 3f854fe496ebe..30865602b485f 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v4.25.2 // source: google/iam/v1/options.proto package iampb diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index dfc60661a3050..84afa92b519b9 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.32.0 -// protoc v4.23.2 +// protoc v4.25.2 // source: google/iam/v1/policy.proto package iampb diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index ae8a1fc146765..c9a57ccddbc54 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -199,6 +199,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/apps/events/subscriptions/apiv1": { + "api_shortname": "workspaceevents", + "distribution_name": "cloud.google.com/go/apps/events/subscriptions/apiv1", + "description": "Google Workspace Events API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/events/subscriptions/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/apps/meet/apiv2": { + "api_shortname": "meet", + "distribution_name": "cloud.google.com/go/apps/meet/apiv2", + "description": "Google Meet API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/apps/meet/apiv2beta": { "api_shortname": "meet", "distribution_name": "cloud.google.com/go/apps/meet/apiv2beta", @@ -1779,6 +1799,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/parallelstore/apiv1beta": { + "api_shortname": "parallelstore", + "distribution_name": "cloud.google.com/go/parallelstore/apiv1beta", + "description": "Parallelstore API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/phishingprotection/apiv1beta1": { "api_shortname": "phishingprotection", "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", @@ -2159,6 +2189,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/securityposture/apiv1": { + "api_shortname": "securityposture", + "distribution_name": "cloud.google.com/go/securityposture/apiv1", + "description": "Security Posture API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securityposture/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/servicecontrol/apiv1": { "api_shortname": "servicecontrol", "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", @@ -2189,6 +2229,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/servicehealth/apiv1": { + "api_shortname": "servicehealth", + "distribution_name": "cloud.google.com/go/servicehealth/apiv1", + "description": "Service Health API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/servicemanagement/apiv1": { "api_shortname": "servicemanagement", "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", @@ -2529,6 +2579,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/visionai/apiv1": { + "api_shortname": "visionai", + "distribution_name": "cloud.google.com/go/visionai/apiv1", + "description": "Vision AI API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/visionai/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/vmmigration/apiv1": { "api_shortname": "vmmigration", "distribution_name": "cloud.google.com/go/vmmigration/apiv1", diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go index eabed000f3091..97738b2cbee2a 100644 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "strings" + "sync" "go.opencensus.io/trace" "go.opentelemetry.io/otel" @@ -50,17 +51,23 @@ const ( ) var ( - // OpenTelemetryTracingEnabled is true if the environment variable + // openTelemetryTracingEnabledMu guards access to openTelemetryTracingEnabled field + openTelemetryTracingEnabledMu = sync.RWMutex{} + // openTelemetryTracingEnabled is true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the // case-insensitive value "opentelemetry". - // - // Do not access directly. Use instead IsOpenTelemetryTracingEnabled or - // IsOpenCensusTracingEnabled. Intended for use only in unit tests. Restore - // original value after each test. - OpenTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) ) +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of unit testing. +// Do not invoke it directly. Intended for use only in unit tests. Restore original value after each test. +func SetOpenTelemetryTracingEnabledField(enabled bool) { + openTelemetryTracingEnabledMu.Lock() + defer openTelemetryTracingEnabledMu.Unlock() + openTelemetryTracingEnabled = enabled +} + // IsOpenCensusTracingEnabled returns true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the // case-insensitive value "opentelemetry". @@ -72,7 +79,9 @@ func IsOpenCensusTracingEnabled() bool { // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the // case-insensitive value "opentelemetry". func IsOpenTelemetryTracingEnabled() bool { - return OpenTelemetryTracingEnabled + openTelemetryTracingEnabledMu.RLock() + defer openTelemetryTracingEnabledMu.RUnlock() + return openTelemetryTracingEnabled } // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index e6a2b0ed66cca..cd2cd4beaa937 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -273,6 +273,9 @@ "oslogin": { "component": "oslogin" }, + "parallelstore": { + "component": "parallelstore" + }, "phishingprotection": { "component": "phishingprotection" }, @@ -330,12 +333,18 @@ "securitycentermanagement": { "component": "securitycentermanagement" }, + "securityposture": { + "component": "securityposture" + }, "servicecontrol": { "component": "servicecontrol" }, "servicedirectory": { "component": "servicedirectory" }, + "servicehealth": { + "component": "servicehealth" + }, "servicemanagement": { "component": "servicemanagement" }, @@ -348,6 +357,9 @@ "shopping": { "component": "shopping" }, + "spanner/test/opentelemetry/test": { + "component": "spanner/test/opentelemetry/test" + }, "speech": { "component": "speech" }, @@ -387,6 +399,9 @@ "vision": { "component": "vision" }, + "visionai": { + "component": "visionai" + }, "vmmigration": { "component": "vmmigration" }, diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 8b3fa6fc483bf..625ad4fbe7ec3 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,71 @@ # Changes +## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29) + + +### Features + +* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3)) +* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330)) + + +### Bug Fixes + +* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9)) +* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a)) + + +### Performance Improvements + +* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca)) + +## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11) + + +### Bug Fixes + +* **storage:** Add object validation case and test ([#9521](https://github.com/googleapis/google-cloud-go/issues/9521)) ([386bef3](https://github.com/googleapis/google-cloud-go/commit/386bef319b4678beaa926ddfe4edef190f11b68d)) + +## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.38.0...storage/v1.39.0) (2024-02-29) + + +### Features + +* **storage:** Make it possible to disable Content-Type sniffing ([#9431](https://github.com/googleapis/google-cloud-go/issues/9431)) ([0676670](https://github.com/googleapis/google-cloud-go/commit/067667058c06689b64401be11858d84441584039)) + +## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.37.0...storage/v1.38.0) (2024-02-12) + + +### Features + +* **storage:** Support auto-detection of access ID for external_account creds ([#9208](https://github.com/googleapis/google-cloud-go/issues/9208)) ([b958d44](https://github.com/googleapis/google-cloud-go/commit/b958d44589f2b6b226ea3bef23829ac75a0aa6a6)) +* **storage:** Support custom hostname for VirtualHostedStyle SignedURLs ([#9348](https://github.com/googleapis/google-cloud-go/issues/9348)) ([7eec40e](https://github.com/googleapis/google-cloud-go/commit/7eec40e4cf82c53e5bf02bd2c14e0b25043da6d0)) +* **storage:** Support universe domains ([#9344](https://github.com/googleapis/google-cloud-go/issues/9344)) ([29a7498](https://github.com/googleapis/google-cloud-go/commit/29a7498b8eb0d00fdb5acd7ee8ce0e5a2a8c11ce)) + + +### Bug Fixes + +* **storage:** Fix v4 url signing for hosts that specify ports ([#9347](https://github.com/googleapis/google-cloud-go/issues/9347)) ([f127b46](https://github.com/googleapis/google-cloud-go/commit/f127b4648f861c1ba44f41a280a62652620c04c2)) + + +### Documentation + +* **storage:** Indicate that gRPC is incompatible with universe domains ([#9386](https://github.com/googleapis/google-cloud-go/issues/9386)) ([e8bd85b](https://github.com/googleapis/google-cloud-go/commit/e8bd85bbce12d5f7ab87fa49d166a6a0d84bd12d)) + +## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) + + +### Features + +* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) +* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) + + +### Bug Fixes + +* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) + ## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 1059d4e8b7dd5..0344ef9de3107 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -275,18 +275,24 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { err := json.Unmarshal(b.c.creds.JSON, &sa) if err != nil { returnErr = err - } else if sa.CredType == "impersonated_service_account" { - start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") - - if end <= start { - returnErr = errors.New("error parsing impersonated service account credentials") - } else { - return sa.SAImpersonationURL[start+1 : end], nil - } - } else if sa.CredType == "service_account" && sa.ClientEmail != "" { - return sa.ClientEmail, nil } else { - returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported") + switch sa.CredType { + case "impersonated_service_account", "external_account": + start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":") + + if end <= start { + returnErr = errors.New("error parsing external or impersonated service account credentials") + } else { + return sa.SAImpersonationURL[start+1 : end], nil + } + case "service_account": + if sa.ClientEmail != "" { + return sa.ClientEmail, nil + } + returnErr = errors.New("empty service account client email") + default: + returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported") + } } } @@ -302,7 +308,7 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) { } } - return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr) + return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr) } func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) { diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 4906b1d1f7048..70b2a280e3b3e 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -254,6 +254,9 @@ type openWriterParams struct { // attrs - see `Writer.ObjectAttrs`. // Required. attrs *ObjectAttrs + // forceEmptyContentType - Disables auto-detect of Content-Type + // Optional. + forceEmptyContentType bool // conds - see `Writer.o.conds`. // Optional. conds *Conditions diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 22adb744fe4f8..b23cebcb83679 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -335,9 +335,10 @@ to add a [custom audit logging] header: This package includes support for the Cloud Storage gRPC API, which is currently in preview. This implementation uses gRPC rather than the current JSON & XML -APIs to make requests to Cloud Storage. If you would like to try the API, -please contact your GCP account rep for more information. The gRPC API is not -yet generally available, so it may be subject to breaking changes. +APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC +team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to +allowlist to access this API. The Go Storage gRPC library is not yet generally +available, so it may be subject to breaking changes. To create a client which will use gRPC, use the alternate constructor: @@ -349,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor: // Use client as usual. If the application is running within GCP, users may get better performance by -enabling DirectPath (enabling requests to skip some proxy steps). To enable, +enabling Google Direct Access (enabling requests to skip some proxy steps). To enable, set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add the following side-effect imports to your application: diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index a51cf9c086b17..e337213f03f29 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "net/url" "os" @@ -27,6 +28,7 @@ import ( "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" "cloud.google.com/go/storage/internal/apiv2/storagepb" + "github.com/golang/protobuf/proto" "github.com/googleapis/gax-go/v2" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -34,8 +36,10 @@ import ( "google.golang.org/api/option/internaloption" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protowire" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" ) @@ -420,6 +424,11 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { + // IncludeFoldersAsPrefixes is not supported for gRPC + // TODO: remove this when support is added in the proto. + if it.query.IncludeFoldersAsPrefixes { + return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") + } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { @@ -897,12 +906,50 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec return r, nil } +// bytesCodec is a grpc codec which permits receiving messages as either +// protobuf messages, or as raw []bytes. +type bytesCodec struct { + encoding.Codec +} + +func (bytesCodec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (bytesCodec) Unmarshal(data []byte, v any) error { + switch v := v.(type) { + case *[]byte: + // If gRPC could recycle the data []byte after unmarshaling (through + // buffer pools), we would need to make a copy here. + *v = data + return nil + case proto.Message: + return proto.Unmarshal(data, v) + default: + return fmt.Errorf("can not unmarshal type %T", v) + } +} + +func (bytesCodec) Name() string { + // If this isn't "", then gRPC sets the content-subtype of the call to this + // value and we get errors. + return "" +} + func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) + s.gax = append(s.gax, gax.WithGRPCOptions( + grpc.ForceCodec(bytesCodec{}), + )) + if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } @@ -918,6 +965,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange req.Generation = params.gen } + var databuf []byte + // Define a function that initiates a Read with offset and length, assuming // we have already read seen bytes. reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) { @@ -952,12 +1001,23 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange return err } - msg, err = stream.Recv() + // Receive the message into databuf as a wire-encoded message so we can + // use a custom decoder to avoid an extra copy at the protobuf layer. + err := stream.RecvMsg(&databuf) // These types of errors show up on the Recv call, rather than the // initialization of the stream via ReadObject above. if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } + if err != nil { + return err + } + // Use a custom decoder that uses protobuf unmarshalling for all + // fields except the checksummed data. + // Subsequent receives in Read calls will skip all protobuf + // unmarshalling and directly read the content from the gRPC []byte + // response, since only the first call will contain other fields. + msg, err = readFullObjectResponse(databuf) return err }, s.retry, s.idempotent) @@ -983,6 +1043,16 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // This is the size of the entire object, even if only a range was requested. size := obj.GetSize() + // Only support checksums when reading an entire object, not a range. + var ( + wantCRC uint32 + checkCRC bool + ) + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { + wantCRC = checksums.GetCrc32C() + checkCRC = true + } + r = &Reader{ Attrs: ReaderObjectAttrs{ Size: size, @@ -1003,7 +1073,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange leftovers: msg.GetChecksummedData().GetContent(), settings: s, zeroRange: params.length == 0, + databuf: databuf, + wantCRC: wantCRC, + checkCRC: checkCRC, }, + checkCRC: checkCRC, } cr := msg.GetContentRange() @@ -1021,12 +1095,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange r.reader.Close() } - // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { - r.wantCRC = checksums.GetCrc32C() - r.checkCRC = true - } - return r, nil } @@ -1401,14 +1469,37 @@ type gRPCReader struct { stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte + databuf []byte cancel context.CancelFunc settings *settings + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc +} + +// Update the running CRC with the data in the slice, if CRC checking was enabled. +func (r *gRPCReader) updateCRC(b []byte) { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b) + } +} + +// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled. +func (r *gRPCReader) runCRCCheck() error { + if r.checkCRC && r.gotCRC != r.wantCRC { + return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC) + } + return nil } // Read reads bytes into the user's buffer from an open gRPC stream. func (r *gRPCReader) Read(p []byte) (int, error) { - // The entire object has been read by this reader, return EOF. + // The entire object has been read by this reader, check the checksum if + // necessary and return EOF. if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } return 0, io.EOF } @@ -1417,7 +1508,7 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // using the same reader. One encounters an error and the stream is closed // and then reopened while the other routine attempts to read from it. if r.stream == nil { - return 0, fmt.Errorf("reader has been closed") + return 0, fmt.Errorf("storage: reader has been closed") } var n int @@ -1426,12 +1517,13 @@ func (r *gRPCReader) Read(p []byte) (int, error) { if len(r.leftovers) > 0 { n = copy(p, r.leftovers) r.seen += int64(n) + r.updateCRC(p[:n]) r.leftovers = r.leftovers[n:] return n, nil } // Attempt to Recv the next message on the stream. - msg, err := r.recv() + content, err := r.recv() if err != nil { return 0, err } @@ -1443,7 +1535,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) { // present in the response here. // TODO: Figure out if we need to support decompressive transcoding // https://cloud.google.com/storage/docs/transcoding. - content := msg.GetChecksummedData().GetContent() n = copy(p[n:], content) leftover := len(content) - n if leftover > 0 { @@ -1452,10 +1543,78 @@ func (r *gRPCReader) Read(p []byte) (int, error) { r.leftovers = content[n:] } r.seen += int64(n) + r.updateCRC(p[:n]) return n, nil } +// WriteTo writes all the data requested by the Reader into w, implementing +// io.WriterTo. +func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) { + // The entire object has been read by this reader, check the checksum if + // necessary and return nil. + if r.size == r.seen || r.zeroRange { + if err := r.runCRCCheck(); err != nil { + return 0, err + } + return 0, nil + } + + // No stream to read from, either never initialized or Close was called. + // Note: There is a potential concurrency issue if multiple routines are + // using the same reader. One encounters an error and the stream is closed + // and then reopened while the other routine attempts to read from it. + if r.stream == nil { + return 0, fmt.Errorf("storage: reader has been closed") + } + + // Track bytes written during before call. + var alreadySeen = r.seen + + // Write any leftovers to the stream. There will be some leftovers from the + // original NewRangeReader call. + if len(r.leftovers) > 0 { + // Write() will write the entire leftovers slice unless there is an error. + written, err := w.Write(r.leftovers) + r.seen += int64(written) + r.updateCRC(r.leftovers) + r.leftovers = nil + if err != nil { + return r.seen - alreadySeen, err + } + } + + // Loop and receive additional messages until the entire data is written. + for { + // Attempt to receive the next message on the stream. + // Will terminate with io.EOF once data has all come through. + // recv() handles stream reopening and retry logic so no need for retries here. + msg, err := r.recv() + if err != nil { + if err == io.EOF { + // We are done; check the checksum if necessary and return. + err = r.runCRCCheck() + } + return r.seen - alreadySeen, err + } + + // TODO: Determine if we need to capture incremental CRC32C for this + // chunk. The Object CRC32C checksum is captured when directed to read + // the entire Object. If directed to read a range, we may need to + // calculate the range's checksum for verification if the checksum is + // present in the response here. + // TODO: Figure out if we need to support decompressive transcoding + // https://cloud.google.com/storage/docs/transcoding. + written, err := w.Write(msg) + r.seen += int64(written) + r.updateCRC(msg) + if err != nil { + return r.seen - alreadySeen, err + } + } + +} + // Close cancels the read stream's context in order for it to be closed and // collected. func (r *gRPCReader) Close() error { @@ -1466,9 +1625,10 @@ func (r *gRPCReader) Close() error { return nil } -// recv attempts to Recv the next message on the stream. In the event -// that a retryable error is encountered, the stream will be closed, reopened, -// and Recv again. This will attempt to Recv until one of the following is true: +// recv attempts to Recv the next message on the stream and extract the object +// data that it contains. In the event that a retryable error is encountered, +// the stream will be closed, reopened, and RecvMsg again. +// This will attempt to Recv until one of the following is true: // // * Recv is successful // * A non-retryable error is encountered @@ -1476,8 +1636,9 @@ func (r *gRPCReader) Close() error { // // The last error received is the one that is returned, which could be from // an attempt to reopen the stream. -func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { - msg, err := r.stream.Recv() +func (r *gRPCReader) recv() ([]byte, error) { + err := r.stream.RecvMsg(&r.databuf) + var shouldRetry = ShouldRetry if r.settings.retry != nil && r.settings.retry.shouldRetry != nil { shouldRetry = r.settings.retry.shouldRetry @@ -1487,10 +1648,195 @@ func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) { // reopen the stream, but will backoff if further attempts are necessary. // Reopening the stream Recvs the first message, so if retrying is // successful, the next logical chunk will be returned. - msg, err = r.reopenStream() + msg, err := r.reopenStream() + return msg.GetChecksummedData().GetContent(), err + } + + if err != nil { + return nil, err + } + + return readObjectResponseContent(r.databuf) +} + +// ReadObjectResponse field and subfield numbers. +const ( + checksummedDataField = protowire.Number(1) + checksummedDataContentField = protowire.Number(1) + checksummedDataCRC32CField = protowire.Number(2) + objectChecksumsField = protowire.Number(2) + contentRangeField = protowire.Number(3) + metadataField = protowire.Number(4) +) + +// readObjectResponseContent returns the checksummed_data.content field of a +// ReadObjectResponse message, or an error if the message is invalid. +// This can be used on recvs of objects after the first recv, since only the +// first message will contain non-data fields. +func readObjectResponseContent(b []byte) ([]byte, error) { + checksummedData, err := readProtoBytes(b, checksummedDataField) + if err != nil { + return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err) + } + content, err := readProtoBytes(checksummedData, checksummedDataContentField) + if err != nil { + return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err) } - return msg, err + return content, nil +} + +// readFullObjectResponse returns the ReadObjectResponse that is encoded in the +// wire-encoded message buffer b, or an error if the message is invalid. +// This must be used on the first recv of an object as it may contain all fields +// of ReadObjectResponse, and we use or pass on those fields to the user. +// This function is essentially identical to proto.Unmarshal, except it aliases +// the data in the input []byte. If the proto library adds a feature to +// Unmarshal that does that, this function can be dropped. +func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) { + msg := &storagepb.ReadObjectResponse{} + + // Loop over the entire message, extracting fields as we go. This does not + // handle field concatenation, in which the contents of a single field + // are split across multiple protobuf tags. + off := 0 + for off < len(b) { + // Consume the next tag. This will tell us which field is next in the + // buffer, its type, and how much space it takes up. + fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:]) + if fieldLength < 0 { + return nil, protowire.ParseError(fieldLength) + } + off += fieldLength + + // Unmarshal the field according to its type. Only fields that are not + // nil will be present. + switch { + case fieldNum == checksummedDataField && fieldType == protowire.BytesType: + // The ChecksummedData field was found. Initialize the struct. + msg.ChecksummedData = &storagepb.ChecksummedData{} + + // Get the bytes corresponding to the checksummed data. + fieldContent, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n)) + } + off += n + + // Get the nested fields. We need to do this manually as it contains + // the object content bytes. + contentOff := 0 + for contentOff < len(fieldContent) { + gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + + switch { + case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType: + // Get the content bytes. + bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Content = bytes + contentOff += n + case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(fieldContent[contentOff:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n)) + } + msg.ChecksummedData.Crc32C = &v + contentOff += n + default: + n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + contentOff += n + } + } + case fieldNum == objectChecksumsField && fieldType == protowire.BytesType: + // The field was found. Initialize the struct. + msg.ObjectChecksums = &storagepb.ObjectChecksums{} + + // Get the bytes corresponding to the checksums. + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n)) + } + off += n + + // Unmarshal. + if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil { + return nil, err + } + case fieldNum == contentRangeField && fieldType == protowire.BytesType: + msg.ContentRange = &storagepb.ContentRange{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil { + return nil, err + } + case fieldNum == metadataField && fieldType == protowire.BytesType: + msg.Metadata = &storagepb.Object{} + + bytes, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n)) + } + off += n + + if err := proto.Unmarshal(bytes, msg.Metadata); err != nil { + return nil, err + } + default: + fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:]) + if fieldLength < 0 { + return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength)) + } + off += fieldLength + } + } + + return msg, nil +} + +// readProtoBytes returns the contents of the protobuf field with number num +// and type bytes from a wire-encoded message. If the field cannot be found, +// the returned slice will be nil and no error will be returned. +// +// It does not handle field concatenation, in which the contents of a single field +// are split across multiple protobuf tags. Encoded data containing split fields +// of this form is technically permissable, but uncommon. +func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) { + off := 0 + for off < len(b) { + gotNum, gotTyp, n := protowire.ConsumeTag(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + if gotNum == num && gotTyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + return b, nil + } + n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + off += n + } + return nil, nil } // reopenStream "closes" the existing stream and attempts to reopen a stream and @@ -1524,16 +1870,17 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) } return &gRPCWriter{ - buf: make([]byte, size), - c: c, - ctx: params.ctx, - reader: r, - bucket: params.bucket, - attrs: params.attrs, - conds: params.conds, - encryptionKey: params.encryptionKey, - sendCRC32C: params.sendCRC32C, - chunkSize: params.chunkSize, + buf: make([]byte, size), + c: c, + ctx: params.ctx, + reader: r, + bucket: params.bucket, + attrs: params.attrs, + conds: params.conds, + encryptionKey: params.encryptionKey, + sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, + forceEmptyContentType: params.forceEmptyContentType, } } @@ -1552,8 +1899,9 @@ type gRPCWriter struct { encryptionKey []byte settings *settings - sendCRC32C bool - chunkSize int + sendCRC32C bool + chunkSize int + forceEmptyContentType bool // The gRPC client-stream used for sending buffers. stream storagepb.Storage_BidiWriteObjectClient @@ -1623,6 +1971,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Send a request with as many bytes as possible. // Loop until all bytes are sent. +sendBytes: // label this loop so that we can use a continue statement from a nested block for { bytesNotYetSent := recvd - sent remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize @@ -1700,10 +2049,6 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // we retry. w.stream = nil - // Drop the stream reference as a new one will need to be created if - // we can retry the upload - w.stream = nil - // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received. @@ -1717,7 +2062,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Continue sending requests, opening a new stream and resending // any bytes not yet persisted as per QueryWriteStatus - continue + continue sendBytes } } if err != nil { @@ -1732,7 +2077,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Not done sending data, do not attempt to commit it yet, loop around // and send more data. if recvd-sent > 0 { - continue + continue sendBytes } // The buffer has been uploaded and there is still more data to be @@ -1763,7 +2108,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Drop the stream reference as a new one will need to be created. w.stream = nil - continue + continue sendBytes } if err != nil { return nil, 0, err @@ -1773,7 +2118,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // Retry if not all bytes were persisted. writeOffset = resp.GetPersistedSize() sent = int(writeOffset) - int(start) - continue + continue sendBytes } } else { // If the object is done uploading, close the send stream to signal @@ -1793,6 +2138,15 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st var obj *storagepb.Object for obj == nil { resp, err := w.stream.Recv() + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + w.stream = nil + continue sendBytes + } if err != nil { return nil, 0, err } @@ -1852,9 +2206,9 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { // read copies the data in the reader to the given buffer and reports how much // data was read into the buffer and if there is no more data to read (EOF). // Furthermore, if the attrs.ContentType is unset, the first bytes of content -// will be sniffed for a matching content type. +// will be sniffed for a matching content type unless forceEmptyContentType is enabled. func (w *gRPCWriter) read() (int, bool, error) { - if w.attrs.ContentType == "" { + if w.attrs.ContentType == "" && !w.forceEmptyContentType { w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader) } // Set n to -1 to start the Read loop. diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e157e4ba994f..f75d93897d9d5 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "errors" "fmt" + "hash/crc32" "io" "io/ioutil" "net/http" @@ -74,9 +75,10 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl // Prepend default options to avoid overriding options passed by the user. o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...) - o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) - + o = append(o, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) // Don't error out here. The user may have passed in their own HTTP // client which does not auth with ADC or other common conventions. c, err := transport.Creds(ctx, o...) @@ -348,6 +350,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) req.MatchGlob(it.query.MatchGlob) + req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } @@ -883,7 +886,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage mediaOpts := []googleapi.MediaOption{ googleapi.ChunkSize(params.chunkSize), } - if c := attrs.ContentType; c != "" { + if c := attrs.ContentType; c != "" || params.forceEmptyContentType { mediaOpts = append(mediaOpts, googleapi.ContentType(c)) } if params.chunkRetryDeadline != 0 { @@ -1216,9 +1219,12 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin } type httpReader struct { - body io.ReadCloser - seen int64 - reopen func(seen int64) (*http.Response, error) + body io.ReadCloser + seen int64 + reopen func(seen int64) (*http.Response, error) + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc } func (r *httpReader) Read(p []byte) (int, error) { @@ -1227,7 +1233,22 @@ func (r *httpReader) Read(p []byte) (int, error) { m, err := r.body.Read(p[n:]) n += m r.seen += int64(m) - if err == nil || err == io.EOF { + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if r.checkCRC { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } return n, err } // Read failed (likely due to connection issues), but we will try to reopen @@ -1433,11 +1454,12 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen Attrs: attrs, size: size, remain: remain, - wantCRC: crc, checkCRC: checkCRC, reader: &httpReader{ - reopen: reopen, - body: body, + reopen: reopen, + body: body, + wantCRC: crc, + checkCRC: checkCRC, }, }, nil } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index c6fd4b341ffa3..415b2b585b503 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 8159589ea9be6..5e2a8f0ad5bec 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,9 +18,11 @@ // Cloud Storage API. // // Stop. This folder is likely not what you are looking for. This folder -// contains protocol buffer definitions for an unreleased API for accessing -// Cloud Storage. Unless told otherwise by a Google Cloud representative, do -// not use any of the contents of this folder. If you would like to use Cloud +// contains protocol buffer definitions for an API only accessible to select +// customers. Customers not participating should not depend on this file. +// Please contact Google Cloud sales if you are interested. Unless told +// otherwise by a Google Cloud representative, do not use or otherwise rely +// on any of the contents of this folder. If you would like to use Cloud // Storage, please consult our official documentation (at // https://cloud.google.com/storage/docs/apis) for details on our XML and // JSON APIs, or else consider one of our client libraries (at diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 64819950600cc..47300d7a10fea 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -78,7 +78,9 @@ type CallOptions struct { func defaultGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("storage.googleapis.com:443"), + internaloption.WithDefaultEndpointTemplate("storage.UNIVERSE_DOMAIN:443"), internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://storage.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), internaloption.EnableJwtWithScope(), @@ -625,18 +627,16 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...) } -// GetIamPolicy gets the IAM policy for a specified bucket or object. +// GetIamPolicy gets the IAM policy for a specified bucket. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket}. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } -// SetIamPolicy updates an IAM policy for the specified bucket or object. +// SetIamPolicy updates an IAM policy for the specified bucket. // The resource field in the request should be -// projects/_/buckets/{bucket} for a bucket or -// projects/_/buckets/{bucket}/objects/{object} for an object. +// projects/_/buckets/{bucket}. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -1139,9 +1139,6 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } @@ -1169,9 +1166,6 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) } - if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 { - routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1]) - } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 3486fd1533ec7..9637bc0a5b66d 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.23.2 +// protoc-gen-go v1.32.0 +// protoc v4.25.2 // source: google/storage/v2/storage.proto package storagepb @@ -2016,6 +2016,7 @@ type WriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: + // // *WriteObjectRequest_UploadId // *WriteObjectRequest_WriteObjectSpec FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -2036,6 +2037,7 @@ type WriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: + // // *WriteObjectRequest_ChecksummedData Data isWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service @@ -2190,6 +2192,7 @@ type WriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *WriteObjectResponse_PersistedSize // *WriteObjectResponse_Resource WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2277,6 +2280,7 @@ type BidiWriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: + // // *BidiWriteObjectRequest_UploadId // *BidiWriteObjectRequest_WriteObjectSpec FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -2297,6 +2301,7 @@ type BidiWriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: + // // *BidiWriteObjectRequest_ChecksummedData Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service @@ -2310,12 +2315,15 @@ type BidiWriteObjectRequest struct { // covers all the bytes the server has persisted thus far and can be used to // decide what data is safe for the client to drop. Note that the object's // current size reported by the BidiWriteObjectResponse may lag behind the - // number of bytes written by the client. + // number of bytes written by the client. This field is ignored if + // `finish_write` is set to true. StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` // Persists data written on the stream, up to and including the current // message, to permanent storage. This option should be used sparingly as it // may reduce performance. Ongoing writes will periodically be persisted on - // the server even when `flush` is not set. + // the server even when `flush` is not set. This field is ignored if + // `finish_write` is set to true since there's no need to checkpoint or flush + // if this message completes the write. Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` // If `true`, this indicates that the write is complete. Sending any // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` @@ -2478,6 +2486,7 @@ type BidiWriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *BidiWriteObjectResponse_PersistedSize // *BidiWriteObjectResponse_Resource WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2608,6 +2617,9 @@ type ListObjectsRequest struct { // Optional. If true, only list all soft-deleted versions of the object. // Soft delete policy is required to set this option. SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. If true, will also include folders and managed folders (besides + // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'. + IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"` // Optional. Filter results to objects and prefixes that match this glob // pattern. See [List Objects Using // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) @@ -2724,6 +2736,13 @@ func (x *ListObjectsRequest) GetSoftDeleted() bool { return false } +func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool { + if x != nil { + return x.IncludeFoldersAsPrefixes + } + return false +} + func (x *ListObjectsRequest) GetMatchGlob() string { if x != nil { return x.MatchGlob @@ -2799,6 +2818,7 @@ type QueryWriteStatusResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: + // // *QueryWriteStatusResponse_PersistedSize // *QueryWriteStatusResponse_Resource WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -4253,6 +4273,10 @@ type Bucket struct { // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's hierarchical namespace configuration. If there is no + // configuration, the hierarchical namespace feature will be disabled and have + // no effect on the bucket. + HierarchicalNamespace *Bucket_HierarchicalNamespace `protobuf:"bytes,32,opt,name=hierarchical_namespace,json=hierarchicalNamespace,proto3" json:"hierarchical_namespace,omitempty"` // Optional. The bucket's soft delete policy. The soft delete policy prevents // soft-deleted objects from being permanently deleted. SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` @@ -4486,6 +4510,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass { return nil } +func (x *Bucket) GetHierarchicalNamespace() *Bucket_HierarchicalNamespace { + if x != nil { + return x.HierarchicalNamespace + } + return nil +} + func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { if x != nil { return x.SoftDeletePolicy @@ -5168,6 +5199,16 @@ type Object struct { CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` // A user-specified timestamp set on an object. CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + // Output only. This is the time when the object became soft-deleted. + // + // Soft-deleted objects are only accessible if a soft_delete_policy is + // enabled. Also see hard_delete_time. + SoftDeleteTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=soft_delete_time,json=softDeleteTime,proto3,oneof" json:"soft_delete_time,omitempty"` + // Output only. The time when the object will be permanently deleted. + // + // Only set when an object becomes soft-deleted with a soft_delete_policy. + // Otherwise, the object will not be accessible. + HardDeleteTime *timestamppb.Timestamp `protobuf:"bytes,29,opt,name=hard_delete_time,json=hardDeleteTime,proto3,oneof" json:"hard_delete_time,omitempty"` } func (x *Object) Reset() { @@ -5391,6 +5432,20 @@ func (x *Object) GetCustomTime() *timestamppb.Timestamp { return nil } +func (x *Object) GetSoftDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.SoftDeleteTime + } + return nil +} + +func (x *Object) GetHardDeleteTime() *timestamppb.Timestamp { + if x != nil { + return x.HardDeleteTime + } + return nil +} + // An access-control entry. type ObjectAccessControl struct { state protoimpl.MessageState @@ -5775,9 +5830,9 @@ type ContentRange struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The starting offset of the object data. + // The starting offset of the object data. This value is inclusive. Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // The ending offset of the object data. + // The ending offset of the object data. This value is exclusive. End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` // The complete length of the object data. CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"` @@ -6693,6 +6748,55 @@ func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Time return nil } +// Configuration for a bucket's hierarchical namespace feature. +type Bucket_HierarchicalNamespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. Enables the hierarchical namespace feature. + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *Bucket_HierarchicalNamespace) Reset() { + *x = Bucket_HierarchicalNamespace{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_HierarchicalNamespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_HierarchicalNamespace) ProtoMessage() {} + +func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead. +func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12} +} + +func (x *Bucket_HierarchicalNamespace) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + // Settings for Uniform Bucket level access. // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. type Bucket_IamConfig_UniformBucketLevelAccess struct { @@ -6712,7 +6816,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6725,7 +6829,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[72] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6771,7 +6875,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6784,7 +6888,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[73] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6831,7 +6935,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6844,7 +6948,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[74] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6929,7 +7033,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6942,7 +7046,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[75] + mi := &file_google_storage_v2_storage_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7602,7 +7706,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, @@ -7633,1201 +7737,1221 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, + 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, + 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, + 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, + 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, + 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, + 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, + 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, + 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, + 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, - 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, - 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, - 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, - 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, + 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, + 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, + 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, - 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, - 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, - 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, - 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, + 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, - 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, - 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, - 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, - 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, - 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, + 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, - 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, - 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, - 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, - 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, - 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, - 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, - 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, - 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, - 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, - 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, - 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, - 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, - 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, - 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, - 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, - 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, - 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, - 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, - 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, - 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, - 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, - 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, - 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, - 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, - 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, - 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, - 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, - 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, - 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, - 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, + 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, - 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, - 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, + 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, + 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, + 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, + 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, - 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, - 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, - 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, - 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, - 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, - 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, + 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, + 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, + 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, + 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, + 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, + 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, + 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, + 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, + 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, + 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, + 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, + 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, + 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, + 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, + 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, + 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, + 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, + 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, + 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, + 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, + 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, + 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, + 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, + 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, + 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, + 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, + 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, + 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, + 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, + 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, + 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, - 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, - 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, - 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, - 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, - 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, - 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, + 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, + 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, + 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, + 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, - 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, - 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, - 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, - 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, - 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, - 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, - 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, - 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, - 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, - 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, + 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, + 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, + 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, + 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, + 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, - 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, - 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, - 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, - 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, - 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, - 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, - 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, - 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, - 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, - 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, - 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, - 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, - 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, - 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, + 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, + 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, + 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, + 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, + 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, + 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, + 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, + 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, + 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, + 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, + 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, + 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, + 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, + 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, + 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, + 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, + 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, + 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, + 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, + 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, + 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, + 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, + 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, + 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, + 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, + 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, - 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, - 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, - 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, - 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, - 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, - 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, - 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, - 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, - 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, - 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, - 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, - 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, - 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, - 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, - 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, - 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, - 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, + 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, + 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, + 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, + 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, + 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, + 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, + 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, + 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, + 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, + 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, + 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, + 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, + 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, + 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, + 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, + 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, + 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, - 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, - 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, - 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, - 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, + 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, + 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, + 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, - 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, - 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, - 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, - 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, - 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, + 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, + 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, + 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, + 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, + 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, - 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, - 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, - 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, - 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, - 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, - 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, - 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, - 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, - 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, + 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, + 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, + 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, + 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, + 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, + 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, + 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, + 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, + 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa, + 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, + 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, + 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, + 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12, - 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, + 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, + 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, + 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, - 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, + 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, - 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, - 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, - 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, + 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, - 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, + 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, + 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, - 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, - 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, + 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, - 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, + 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, + 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, + 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, + 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14, - 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca, - 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, - 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, - 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, - 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, + 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, + 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, + 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, + 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, + 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, + 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, + 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, - 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, - 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, - 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, - 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, + 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, + 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, + 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, + 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -8843,7 +8967,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79) var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8917,31 +9041,32 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass - nil, // 72: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 78: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 81: google.protobuf.Duration - (*date.Date)(nil), // 82: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 86: google.protobuf.Empty - (*iampb.Policy)(nil), // 87: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse + (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace + nil, // 73: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 79: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 82: google.protobuf.Duration + (*date.Date)(nil), // 83: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 87: google.protobuf.Empty + (*iampb.Policy)(nil), // 88: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object @@ -8951,9 +9076,9 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange @@ -8969,7 +9094,7 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object - 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object @@ -8980,19 +9105,19 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging @@ -9003,108 +9128,111 @@ var file_google_storage_v2_storage_proto_depIdxs = []int32{ 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy - 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration - 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp - 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp - 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest - 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest - 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object - 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse - 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 132, // [132:164] is the sub-list for method output_type - 100, // [100:132] is the sub-list for method input_type - 100, // [100:100] is the sub-list for extension type_name - 100, // [100:100] is the sub-list for extension extendee - 0, // [0:100] is the sub-list for field type_name + 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace + 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp + 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp + 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 135, // [135:167] is the sub-list for method output_type + 103, // [103:135] is the sub-list for method input_type + 103, // [103:103] is the sub-list for extension type_name + 103, // [103:103] is the sub-list for extension extendee + 0, // [0:103] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -9965,8 +10093,8 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_HierarchicalNamespace); i { case 0: return &v.state case 1: @@ -9978,7 +10106,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { case 0: return &v.state case 1: @@ -9990,7 +10118,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + switch v := v.(*Bucket_Lifecycle_Rule); i { case 0: return &v.state case 1: @@ -10002,6 +10130,18 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule_Action); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -10055,14 +10195,14 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 78, + NumMessages: 79, NumExtensions: 0, NumServices: 1, }, @@ -10099,15 +10239,13 @@ type StorageClient interface { ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. + // Gets the IAM policy for a specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket or object. + // Updates an IAM policy for the specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. @@ -10647,15 +10785,13 @@ type StorageServer interface { ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) // Locks retention policy on a bucket. LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) - // Gets the IAM policy for a specified bucket or object. + // Gets the IAM policy for a specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) - // Updates an IAM policy for the specified bucket or object. + // Updates an IAM policy for the specified bucket. // The `resource` field in the request should be - // `projects/_/buckets/{bucket}` for a bucket or - // `projects/_/buckets/{bucket}/objects/{object}` for an object. + // `projects/_/buckets/{bucket}`. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 2d5cf890e064b..1c52a3504b2c9 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.36.0" +const Version = "1.40.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index dc79fd88bbc49..1b52eb5d2c659 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,6 +70,9 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) + if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, err + } attempts++ return !errorFunc(err), err }) diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 4673a68d07899..0b228a6a76c9c 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -198,9 +198,7 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) type Reader struct { Attrs ReaderObjectAttrs seen, remain, size int64 - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc + checkCRC bool // Did we check the CRC? This is now only used by tests. reader io.ReadCloser ctx context.Context @@ -218,17 +216,17 @@ func (r *Reader) Read(p []byte) (int, error) { if r.remain != -1 { r.remain -= int64(n) } - if r.checkCRC { - r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) - // Check CRC here. It would be natural to check it in Close, but - // everybody defers Close on the assumption that it doesn't return - // anything worth looking at. - if err == io.EOF { - if r.gotCRC != r.wantCRC { - return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", - r.gotCRC, r.wantCRC) - } - } + return n, err +} + +// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface. +// This is called implicitly when calling io.Copy on a Reader. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + // This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an + // implementation of WriteTo. + n, err := io.Copy(w, r.reader) + if r.remain != -1 { + r.remain -= int64(n) } return n, err } diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 78ecbf0e892a5..c01085f35ddac 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -146,8 +146,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // Prepend default options to avoid overriding options passed by the user. opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...) - opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) + opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"), + internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"), + internaloption.WithDefaultUniverseDomain("googleapis.com"), + ) // Don't error out here. The user may have passed in their own HTTP // client which does not auth with ADC or other common conventions. @@ -217,6 +219,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error // NewGRPCClient creates a new Storage client using the gRPC transport and API. // Client methods which have not been implemented in gRPC will return an error. // In particular, methods for Cloud Pub/Sub notifications are not supported. +// Using a non-default universe domain is also not supported with the Storage +// gRPC client. // // The storage gRPC API is still in preview and not yet publicly available. // If you would like to use the API, please first contact your GCP account rep to @@ -299,7 +303,11 @@ func (s pathStyle) host(hostname, bucket string) string { return "storage.googleapis.com" } -func (s virtualHostedStyle) host(_, bucket string) string { +func (s virtualHostedStyle) host(hostname, bucket string) string { + if hostname != "" { + return bucket + "." + stripScheme(hostname) + } + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return bucket + "." + stripScheme(host) } @@ -455,7 +463,7 @@ type SignedURLOptions struct { // Hostname sets the host of the signed URL. This field overrides any // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. - // Only compatible with PathStyle URLStyle. + // Only compatible with PathStyle and VirtualHostedStyle URLStyles. // Optional. Hostname string } @@ -743,7 +751,7 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st } var headersWithValue []string - headersWithValue = append(headersWithValue, "host:"+u.Host) + headersWithValue = append(headersWithValue, "host:"+u.Hostname()) headersWithValue = append(headersWithValue, opts.Headers...) if opts.ContentType != "" { headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) @@ -1088,6 +1096,10 @@ func (o *ObjectHandle) validate() error { if !utf8.ValidString(o.object) { return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) } + // Names . and .. are not valid; see https://cloud.google.com/storage/docs/objects#naming + if o.object == "." || o.object == ".." { + return fmt.Errorf("storage: object name %q is not valid", o.object) + } return nil } @@ -1620,6 +1632,11 @@ type Query struct { // for syntax details. When Delimiter is set in conjunction with MatchGlob, // it must be set to /. MatchGlob string + + // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of + // prefixes returned by the query. Only applicable if Delimiter is set to /. + // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. + IncludeFoldersAsPrefixes bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -2076,6 +2093,26 @@ func (wb *withBackoff) apply(config *retryConfig) { config.backoff = &wb.backoff } +// WithMaxAttempts configures the maximum number of times an API call can be made +// in the case of retryable errors. +// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 +// times total (initial call plus 4 retries). +// Without this setting, operations will continue retrying indefinitely +// until either the context is canceled or a deadline is reached. +func WithMaxAttempts(maxAttempts int) RetryOption { + return &withMaxAttempts{ + maxAttempts: maxAttempts, + } +} + +type withMaxAttempts struct { + maxAttempts int +} + +func (wb *withMaxAttempts) apply(config *retryConfig) { + config.maxAttempts = &wb.maxAttempts +} + // RetryPolicy describes the available policies for which operations should be // retried. The default is `RetryIdempotent`. type RetryPolicy int @@ -2148,6 +2185,7 @@ type retryConfig struct { backoff *gax.Backoff policy RetryPolicy shouldRetry func(err error) bool + maxAttempts *int } func (r *retryConfig) clone() *retryConfig { @@ -2168,6 +2206,7 @@ func (r *retryConfig) clone() *retryConfig { backoff: bo, policy: r.policy, shouldRetry: r.shouldRetry, + maxAttempts: r.maxAttempts, } } diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index aeb7ed418c8d6..43a0f0d109374 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -88,6 +88,11 @@ type Writer struct { // cancellation. ChunkRetryDeadline time.Duration + // ForceEmptyContentType is an optional parameter that is used to disable + // auto-detection of Content-Type. By default, if a blank Content-Type + // is provided, then gax.DetermineContentType is called to sniff the type. + ForceEmptyContentType bool + // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see @@ -180,18 +185,19 @@ func (w *Writer) openWriter() (err error) { isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true) opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject) params := &openWriterParams{ - ctx: w.ctx, - chunkSize: w.ChunkSize, - chunkRetryDeadline: w.ChunkRetryDeadline, - bucket: w.o.bucket, - attrs: &w.ObjectAttrs, - conds: w.o.conds, - encryptionKey: w.o.encryptionKey, - sendCRC32C: w.SendCRC32C, - donec: w.donec, - setError: w.error, - progress: w.progress, - setObj: func(o *ObjectAttrs) { w.obj = o }, + ctx: w.ctx, + chunkSize: w.ChunkSize, + chunkRetryDeadline: w.ChunkRetryDeadline, + bucket: w.o.bucket, + attrs: &w.ObjectAttrs, + conds: w.o.conds, + encryptionKey: w.o.encryptionKey, + sendCRC32C: w.SendCRC32C, + donec: w.donec, + setError: w.error, + progress: w.progress, + setObj: func(o *ObjectAttrs) { w.obj = o }, + forceEmptyContentType: w.ForceEmptyContentType, } if err := w.ctx.Err(); err != nil { return err // short-circuit diff --git a/vendor/github.com/goccy/go-json/.codecov.yml b/vendor/github.com/goccy/go-json/.codecov.yml new file mode 100644 index 0000000000000..e98134570c4f3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/.codecov.yml @@ -0,0 +1,32 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "70...100" + + status: + project: + default: + target: 70% + threshold: 2% + patch: off + changes: no + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "header,diff" + behavior: default + require_changes: no + +ignore: + - internal/encoder/vm_color + - internal/encoder/vm_color_indent diff --git a/vendor/github.com/goccy/go-json/.gitignore b/vendor/github.com/goccy/go-json/.gitignore new file mode 100644 index 0000000000000..378283829cfae --- /dev/null +++ b/vendor/github.com/goccy/go-json/.gitignore @@ -0,0 +1,2 @@ +cover.html +cover.out diff --git a/vendor/github.com/goccy/go-json/.golangci.yml b/vendor/github.com/goccy/go-json/.golangci.yml new file mode 100644 index 0000000000000..977accaa9f4fe --- /dev/null +++ b/vendor/github.com/goccy/go-json/.golangci.yml @@ -0,0 +1,86 @@ +run: + skip-files: + - encode_optype.go + - ".*_test\\.go$" + +linters-settings: + govet: + enable-all: true + disable: + - shadow + +linters: + enable-all: true + disable: + - dogsled + - dupl + - exhaustive + - exhaustivestruct + - errorlint + - forbidigo + - funlen + - gci + - gochecknoglobals + - gochecknoinits + - gocognit + - gocritic + - gocyclo + - godot + - godox + - goerr113 + - gofumpt + - gomnd + - gosec + - ifshort + - lll + - makezero + - nakedret + - nestif + - nlreturn + - paralleltest + - testpackage + - thelper + - wrapcheck + - interfacer + - lll + - nakedret + - nestif + - nlreturn + - testpackage + - wsl + - varnamelen + - nilnil + - ireturn + - govet + - forcetypeassert + - cyclop + - containedctx + - revive + - nosnakecase + - exhaustruct + - depguard + +issues: + exclude-rules: + # not needed + - path: /*.go + text: "ST1003: should not use underscores in package names" + linters: + - stylecheck + - path: /*.go + text: "don't use an underscore in package name" + linters: + - golint + - path: rtype.go + linters: + - golint + - stylecheck + - path: error.go + linters: + - staticcheck + + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md new file mode 100644 index 0000000000000..d09bb89c318ae --- /dev/null +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -0,0 +1,425 @@ +# v0.10.2 - 2023/03/20 + +### New features + +* Support DebugDOT option for debugging encoder ( #440 ) + +### Fix bugs + +* Fix combination of embedding structure and omitempty option ( #442 ) + +# v0.10.1 - 2023/03/13 + +### Fix bugs + +* Fix checkptr error for array decoder ( #415 ) +* Fix added buffer size check when decoding key ( #430 ) +* Fix handling of anonymous fields other than struct ( #431 ) +* Fix to not optimize when lower conversion can't handle byte-by-byte ( #432 ) +* Fix a problem that MarshalIndent does not work when UnorderedMap is specified ( #435 ) +* Fix mapDecoder.DecodeStream() for empty objects containing whitespace ( #425 ) +* Fix an issue that could not set the correct NextField for fields in the embedded structure ( #438 ) + +# v0.10.0 - 2022/11/29 + +### New features + +* Support JSON Path ( #250 ) + +### Fix bugs + +* Fix marshaler for map's key ( #409 ) + +# v0.9.11 - 2022/08/18 + +### Fix bugs + +* Fix unexpected behavior when buffer ends with backslash ( #383 ) +* Fix stream decoding of escaped character ( #387 ) + +# v0.9.10 - 2022/07/15 + +### Fix bugs + +* Fix boundary exception of type caching ( #382 ) + +# v0.9.9 - 2022/07/15 + +### Fix bugs + +* Fix encoding of directed interface with typed nil ( #377 ) +* Fix embedded primitive type encoding using alias ( #378 ) +* Fix slice/array type encoding with types implementing MarshalJSON ( #379 ) +* Fix unicode decoding when the expected buffer state is not met after reading ( #380 ) + +# v0.9.8 - 2022/06/30 + +### Fix bugs + +* Fix decoding of surrogate-pair ( #365 ) +* Fix handling of embedded primitive type ( #366 ) +* Add validation of escape sequence for decoder ( #367 ) +* Fix stream tokenizing respecting UseNumber ( #369 ) +* Fix encoding when struct pointer type that implements Marshal JSON is embedded ( #375 ) + +### Improve performance + +* Improve performance of linkRecursiveCode ( #368 ) + +# v0.9.7 - 2022/04/22 + +### Fix bugs + +#### Encoder + +* Add filtering process for encoding on slow path ( #355 ) +* Fix encoding of interface{} with pointer type ( #363 ) + +#### Decoder + +* Fix map key decoder that implements UnmarshalJSON ( #353 ) +* Fix decoding of []uint8 type ( #361 ) + +### New features + +* Add DebugWith option for encoder ( #356 ) + +# v0.9.6 - 2022/03/22 + +### Fix bugs + +* Correct the handling of the minimum value of int type for decoder ( #344 ) +* Fix bugs of stream decoder's bufferSize ( #349 ) +* Add a guard to use typeptr more safely ( #351 ) + +### Improve decoder performance + +* Improve escapeString's performance ( #345 ) + +### Others + +* Update go version for CI ( #347 ) + +# v0.9.5 - 2022/03/04 + +### Fix bugs + +* Fix panic when decoding time.Time with context ( #328 ) +* Fix reading the next character in buffer to nul consideration ( #338 ) +* Fix incorrect handling on skipValue ( #341 ) + +### Improve decoder performance + +* Improve performance when a payload contains escape sequence ( #334 ) + +# v0.9.4 - 2022/01/21 + +* Fix IsNilForMarshaler for string type with omitempty ( #323 ) +* Fix the case where the embedded field is at the end ( #326 ) + +# v0.9.3 - 2022/01/14 + +* Fix logic of removing struct field for decoder ( #322 ) + +# v0.9.2 - 2022/01/14 + +* Add invalid decoder to delay type error judgment at decode ( #321 ) + +# v0.9.1 - 2022/01/11 + +* Fix encoding of MarshalText/MarshalJSON operation with head offset ( #319 ) + +# v0.9.0 - 2022/01/05 + +### New feature + +* Supports dynamic filtering of struct fields ( #314 ) + +### Improve encoding performance + +* Improve map encoding performance ( #310 ) +* Optimize encoding path for escaped string ( #311 ) +* Add encoding option for performance ( #312 ) + +### Fix bugs + +* Fix panic at encoding map value on 1.18 ( #310 ) +* Fix MarshalIndent for interface type ( #317 ) + +# v0.8.1 - 2021/12/05 + +* Fix operation conversion from PtrHead to Head in Recursive type ( #305 ) + +# v0.8.0 - 2021/12/02 + +* Fix embedded field conflict behavior ( #300 ) +* Refactor compiler for encoder ( #301 #302 ) + +# v0.7.10 - 2021/10/16 + +* Fix conversion from pointer to uint64 ( #294 ) + +# v0.7.9 - 2021/09/28 + +* Fix encoding of nil value about interface type that has method ( #291 ) + +# v0.7.8 - 2021/09/01 + +* Fix mapassign_faststr for indirect struct type ( #283 ) +* Fix encoding of not empty interface type ( #284 ) +* Fix encoding of empty struct interface type ( #286 ) + +# v0.7.7 - 2021/08/25 + +* Fix invalid utf8 on stream decoder ( #279 ) +* Fix buffer length bug on string stream decoder ( #280 ) + +Thank you @orisano !! + +# v0.7.6 - 2021/08/13 + +* Fix nil slice assignment ( #276 ) +* Improve error message ( #277 ) + +# v0.7.5 - 2021/08/12 + +* Fix encoding of embedded struct with tags ( #265 ) +* Fix encoding of embedded struct that isn't first field ( #272 ) +* Fix decoding of binary type with escaped char ( #273 ) + +# v0.7.4 - 2021/07/06 + +* Fix encoding of indirect layout structure ( #264 ) + +# v0.7.3 - 2021/06/29 + +* Fix encoding of pointer type in empty interface ( #262 ) + +# v0.7.2 - 2021/06/26 + +### Fix decoder + +* Add decoder for func type to fix decoding of nil function value ( #257 ) +* Fix stream decoding of []byte type ( #258 ) + +### Performance + +* Improve decoding performance of map[string]interface{} type ( use `mapassign_faststr` ) ( #256 ) +* Improve encoding performance of empty interface type ( remove recursive calling of `vm.Run` ) ( #259 ) + +### Benchmark + +* Add bytedance/sonic as benchmark target ( #254 ) + +# v0.7.1 - 2021/06/18 + +### Fix decoder + +* Fix error when unmarshal empty array ( #253 ) + +# v0.7.0 - 2021/06/12 + +### Support context for MarshalJSON and UnmarshalJSON ( #248 ) + +* json.MarshalContext(context.Context, interface{}, ...json.EncodeOption) ([]byte, error) +* json.NewEncoder(io.Writer).EncodeContext(context.Context, interface{}, ...json.EncodeOption) error +* json.UnmarshalContext(context.Context, []byte, interface{}, ...json.DecodeOption) error +* json.NewDecoder(io.Reader).DecodeContext(context.Context, interface{}) error + +```go +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} +``` + +### Add DecodeFieldPriorityFirstWin option ( #242 ) + +In the default behavior, go-json, like encoding/json, will reflect the result of the last evaluation when a field with the same name exists. I've added new options to allow you to change this behavior. `json.DecodeFieldPriorityFirstWin` option reflects the result of the first evaluation if a field with the same name exists. This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated. + +### Fix encoder + +* Fix indent number contains recursive type ( #249 ) +* Fix encoding of using empty interface as map key ( #244 ) + +### Fix decoder + +* Fix decoding fields containing escaped characters ( #237 ) + +### Refactor + +* Move some tests to subdirectory ( #243 ) +* Refactor package layout for decoder ( #238 ) + +# v0.6.1 - 2021/06/02 + +### Fix encoder + +* Fix value of totalLength for encoding ( #236 ) + +# v0.6.0 - 2021/06/01 + +### Support Colorize option for encoding (#233) + +```go +b, err := json.MarshalWithOption(v, json.Colorize(json.DefaultColorScheme)) +if err != nil { + ... +} +fmt.Println(string(b)) // print colored json +``` + +### Refactor + +* Fix opcode layout - Adjust memory layout of the opcode to 128 bytes in a 64-bit environment ( #230 ) +* Refactor encode option ( #231 ) +* Refactor escape string ( #232 ) + +# v0.5.1 - 2021/5/20 + +### Optimization + +* Add type addrShift to enable bigger encoder/decoder cache ( #213 ) + +### Fix decoder + +* Keep original reference of slice element ( #229 ) + +### Refactor + +* Refactor Debug mode for encoding ( #226 ) +* Generate VM sources for encoding ( #227 ) +* Refactor validator for null/true/false for decoding ( #221 ) + +# v0.5.0 - 2021/5/9 + +### Supports using omitempty and string tags at the same time ( #216 ) + +### Fix decoder + +* Fix stream decoder for unicode char ( #215 ) +* Fix decoding of slice element ( #219 ) +* Fix calculating of buffer length for stream decoder ( #220 ) + +### Refactor + +* replace skipWhiteSpace goto by loop ( #212 ) + +# v0.4.14 - 2021/5/4 + +### Benchmark + +* Add valyala/fastjson to benchmark ( #193 ) +* Add benchmark task for CI ( #211 ) + +### Fix decoder + +* Fix decoding of slice with unmarshal json type ( #198 ) +* Fix decoding of null value for interface type that does not implement Unmarshaler ( #205 ) +* Fix decoding of null value to []byte by json.Unmarshal ( #206 ) +* Fix decoding of backslash char at the end of string ( #207 ) +* Fix stream decoder for null/true/false value ( #208 ) +* Fix stream decoder for slow reader ( #211 ) + +### Performance + +* If cap of slice is enough, reuse slice data for compatibility with encoding/json ( #200 ) + +# v0.4.13 - 2021/4/20 + +### Fix json.Compact and json.Indent + +* Support validation the input buffer for json.Compact and json.Indent ( #189 ) +* Optimize json.Compact and json.Indent ( improve memory footprint ) ( #190 ) + +# v0.4.12 - 2021/4/15 + +### Fix encoder + +* Fix unnecessary indent for empty slice type ( #181 ) +* Fix encoding of omitempty feature for the slice or interface type ( #183 ) +* Fix encoding custom types zero values with omitempty when marshaller exists ( #187 ) + +### Fix decoder + +* Fix decoder for invalid top level value ( #184 ) +* Fix decoder for invalid number value ( #185 ) + +# v0.4.11 - 2021/4/3 + +* Improve decoder performance for interface type + +# v0.4.10 - 2021/4/2 + +### Fix encoder + +* Fixed a bug when encoding slice and map containing recursive structures +* Fixed a logic to determine if indirect reference + +# v0.4.9 - 2021/3/29 + +### Add debug mode + +If you use `json.MarshalWithOption(v, json.Debug())` and `panic` occurred in `go-json`, produces debug information to console. + +### Support a new feature to compatible with encoding/json + +- invalid UTF-8 is coerced to valid UTF-8 ( without performance down ) + +### Fix encoder + +- Fixed handling of MarshalJSON of function type + +### Fix decoding of slice of pointer type + +If there is a pointer value, go-json will use it. (This behavior is necessary to achieve the ability to prioritize pre-filled values). However, since slices are reused internally, there was a bug that referred to the previous pointer value. Therefore, it is not necessary to refer to the pointer value in advance for the slice element, so we explicitly initialize slice element by `nil`. + +# v0.4.8 - 2021/3/21 + +### Reduce memory usage at compile time + +* go-json have used about 2GB of memory at compile time, but now it can compile with about less than 550MB. + +### Fix any encoder's bug + +* Add many test cases for encoder +* Fix composite type ( slice/array/map ) +* Fix pointer types +* Fix encoding of MarshalJSON or MarshalText or json.Number type + +### Refactor encoder + +* Change package layout for reducing memory usage at compile +* Remove anonymous and only operation +* Remove root property from encodeCompileContext and opcode + +### Fix CI + +* Add Go 1.16 +* Remove Go 1.13 +* Fix `make cover` task + +### Number/Delim/Token/RawMessage use the types defined in encoding/json by type alias + +# v0.4.7 - 2021/02/22 + +### Fix decoder + +* Fix decoding of deep recursive structure +* Fix decoding of embedded unexported pointer field +* Fix invalid test case +* Fix decoding of invalid value +* Fix decoding of prefilled value +* Fix not being able to return UnmarshalTypeError when it should be returned +* Fix decoding of null value +* Fix decoding of type of null string +* Use pre allocated pointer if exists it at decoding + +### Reduce memory usage at compile + +* Integrate int/int8/int16/int32/int64 and uint/uint8/uint16/uint32/uint64 operation to reduce memory usage at compile + +### Remove unnecessary optype diff --git a/vendor/github.com/goccy/go-json/LICENSE b/vendor/github.com/goccy/go-json/LICENSE new file mode 100644 index 0000000000000..6449c8bff65e3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goccy/go-json/Makefile b/vendor/github.com/goccy/go-json/Makefile new file mode 100644 index 0000000000000..c030577dcf9d5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/Makefile @@ -0,0 +1,39 @@ +PKG := github.com/goccy/go-json + +BIN_DIR := $(CURDIR)/bin +PKGS := $(shell go list ./... | grep -v internal/cmd|grep -v test) +COVER_PKGS := $(foreach pkg,$(PKGS),$(subst $(PKG),.,$(pkg))) + +COMMA := , +EMPTY := +SPACE := $(EMPTY) $(EMPTY) +COVERPKG_OPT := $(subst $(SPACE),$(COMMA),$(COVER_PKGS)) + +$(BIN_DIR): + @mkdir -p $(BIN_DIR) + +.PHONY: cover +cover: + go test -coverpkg=$(COVERPKG_OPT) -coverprofile=cover.out ./... + +.PHONY: cover-html +cover-html: cover + go tool cover -html=cover.out + +.PHONY: lint +lint: golangci-lint + $(BIN_DIR)/golangci-lint run + +golangci-lint: | $(BIN_DIR) + @{ \ + set -e; \ + GOLANGCI_LINT_TMP_DIR=$$(mktemp -d); \ + cd $$GOLANGCI_LINT_TMP_DIR; \ + go mod init tmp; \ + GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.54.2; \ + rm -rf $$GOLANGCI_LINT_TMP_DIR; \ + } + +.PHONY: generate +generate: + go generate ./internal/... diff --git a/vendor/github.com/goccy/go-json/README.md b/vendor/github.com/goccy/go-json/README.md new file mode 100644 index 0000000000000..7bacc54f9cd07 --- /dev/null +++ b/vendor/github.com/goccy/go-json/README.md @@ -0,0 +1,529 @@ +# go-json + +![Go](https://github.com/goccy/go-json/workflows/Go/badge.svg) +[![GoDoc](https://godoc.org/github.com/goccy/go-json?status.svg)](https://pkg.go.dev/github.com/goccy/go-json?tab=doc) +[![codecov](https://codecov.io/gh/goccy/go-json/branch/master/graph/badge.svg)](https://codecov.io/gh/goccy/go-json) + +Fast JSON encoder/decoder compatible with encoding/json for Go + + + +# Roadmap + +``` +* version ( expected release date ) + +* v0.9.0 + | + | while maintaining compatibility with encoding/json, we will add convenient APIs + | + v +* v1.0.0 +``` + +We are accepting requests for features that will be implemented between v0.9.0 and v.1.0.0. +If you have the API you need, please submit your issue [here](https://github.com/goccy/go-json/issues). + +# Features + +- Drop-in replacement of `encoding/json` +- Fast ( See [Benchmark section](https://github.com/goccy/go-json#benchmarks) ) +- Flexible customization with options +- Coloring the encoded string +- Can propagate context.Context to `MarshalJSON` or `UnmarshalJSON` +- Can dynamically filter the fields of the structure type-safely + +# Installation + +``` +go get github.com/goccy/go-json +``` + +# How to use + +Replace import statement from `encoding/json` to `github.com/goccy/go-json` + +``` +-import "encoding/json" ++import "github.com/goccy/go-json" +``` + +# JSON library comparison + +| name | encoder | decoder | compatible with `encoding/json` | +| :----: | :------: | :-----: | :-----------------------------: | +| encoding/json | yes | yes | N/A | +| [json-iterator/go](https://github.com/json-iterator/go) | yes | yes | partial | +| [easyjson](https://github.com/mailru/easyjson) | yes | yes | no | +| [gojay](https://github.com/francoispqt/gojay) | yes | yes | no | +| [segmentio/encoding/json](https://github.com/segmentio/encoding/tree/master/json) | yes | yes | partial | +| [jettison](https://github.com/wI2L/jettison) | yes | no | no | +| [simdjson-go](https://github.com/minio/simdjson-go) | no | yes | no | +| goccy/go-json | yes | yes | yes | + +- `json-iterator/go` isn't compatible with `encoding/json` in many ways (e.g. https://github.com/json-iterator/go/issues/229 ), but it hasn't been supported for a long time. +- `segmentio/encoding/json` is well supported for encoders, but some are not supported for decoder APIs such as `Token` ( streaming decode ) + +## Other libraries + +- [jingo](https://github.com/bet365/jingo) + +I tried the benchmark but it didn't work. +Also, it seems to panic when it receives an unexpected value because there is no error handling... + +- [ffjson](https://github.com/pquerna/ffjson) + +Benchmarking gave very slow results. +It seems that it is assumed that the user will use the buffer pool properly. +Also, development seems to have already stopped + +# Benchmarks + +``` +$ cd benchmarks +$ go test -bench . +``` + +## Encode + + + + +## Decode + + + + + + +# Fuzzing + +[go-json-fuzz](https://github.com/goccy/go-json-fuzz) is the repository for fuzzing tests. +If you run the test in this repository and find a bug, please commit to corpus to go-json-fuzz and report the issue to [go-json](https://github.com/goccy/go-json/issues). + +# How it works + +`go-json` is very fast in both encoding and decoding compared to other libraries. +It's easier to implement by using automatic code generation for performance or by using a dedicated interface, but `go-json` dares to stick to compatibility with `encoding/json` and is the simple interface. Despite this, we are developing with the aim of being the fastest library. + +Here, we explain the various speed-up techniques implemented by `go-json`. + +## Basic technique + +The techniques listed here are the ones used by most of the libraries listed above. + +### Buffer reuse + +Since the only value required for the result of `json.Marshal(interface{}) ([]byte, error)` is `[]byte`, the only value that must be allocated during encoding is the return value `[]byte` . + +Also, as the number of allocations increases, the performance will be affected, so the number of allocations should be kept as low as possible when creating `[]byte`. + +Therefore, there is a technique to reduce the number of times a new buffer must be allocated by reusing the buffer used for the previous encoding by using `sync.Pool`. + +Finally, you allocate a buffer that is as long as the resulting buffer and copy the contents into it, you only need to allocate the buffer once in theory. + +```go +type buffer struct { + data []byte +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return &buffer{data: make([]byte, 0, 1024)} + }, +} + +buf := bufPool.Get().(*buffer) +data := encode(buf.data) // reuse buf.data + +newBuf := make([]byte, len(data)) +copy(newBuf, buf) + +buf.data = data +bufPool.Put(buf) +``` + +### Elimination of reflection + +As you know, the reflection operation is very slow. + +Therefore, using the fact that the address position where the type information is stored is fixed for each binary ( we call this `typeptr` ), +we can use the address in the type information to call a pre-built optimized process. + +For example, you can get the address to the type information from `interface{}` as follows and you can use that information to call a process that does not have reflection. + +To process without reflection, pass a pointer (`unsafe.Pointer`) to the value is stored. + +```go + +type emptyInterface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +var typeToEncoder = map[uintptr]func(unsafe.Pointer)([]byte, error){} + +func Marshal(v interface{}) ([]byte, error) { + iface := (*emptyInterface)(unsafe.Pointer(&v) + typeptr := uintptr(iface.typ) + if enc, exists := typeToEncoder[typeptr]; exists { + return enc(iface.ptr) + } + ... +} +``` + +※ In reality, `typeToEncoder` can be referenced by multiple goroutines, so exclusive control is required. + +## Unique speed-up technique + +## Encoder + +### Do not escape arguments of `Marshal` + +`json.Marshal` and `json.Unmarshal` receive `interface{}` value and they perform type determination dynamically to process. +In normal case, you need to use the `reflect` library to determine the type dynamically, but since `reflect.Type` is defined as `interface`, when you call the method of `reflect.Type`, The reflect's argument is escaped. + +Therefore, the arguments for `Marshal` and `Unmarshal` are always escaped to the heap. +However, `go-json` can use the feature of `reflect.Type` while avoiding escaping. + +`reflect.Type` is defined as `interface`, but in reality `reflect.Type` is implemented only by the structure `rtype` defined in the `reflect` package. +For this reason, to date `reflect.Type` is the same as `*reflect.rtype`. + +Therefore, by directly handling `*reflect.rtype`, which is an implementation of `reflect.Type`, it is possible to avoid escaping because it changes from `interface` to using `struct`. + +The technique for working with `*reflect.rtype` directly from `go-json` is implemented at [rtype.go](https://github.com/goccy/go-json/blob/master/internal/runtime/rtype.go) + +Also, the same technique is cut out as a library ( https://github.com/goccy/go-reflect ) + +Initially this feature was the default behavior of `go-json`. +But after careful testing, I found that I passed a large value to `json.Marshal()` and if the argument could not be assigned to the stack, it could not be properly escaped to the heap (a bug in the Go compiler). + +Therefore, this feature will be provided as an **optional** until this issue is resolved. + +To use it, add `NoEscape` like `MarshalNoEscape()` + +### Encoding using opcode sequence + +I explained that you can use `typeptr` to call a pre-built process from type information. + +In other libraries, this dedicated process is processed by making it an function calling like anonymous function, but function calls are inherently slow processes and should be avoided as much as possible. + +Therefore, `go-json` adopted the Instruction-based execution processing system, which is also used to implement virtual machines for programming language. + +If it is the first type to encode, create the opcode ( instruction ) sequence required for encoding. +From the second time onward, use `typeptr` to get the cached pre-built opcode sequence and encode it based on it. An example of the opcode sequence is shown below. + +```go +json.Marshal(struct{ + X int `json:"x"` + Y string `json:"y"` +}{X: 1, Y: "hello"}) +``` + +When encoding a structure like the one above, create a sequence of opcodes like this: + +``` +- opStructFieldHead ( `{` ) +- opStructFieldInt ( `"x": 1,` ) +- opStructFieldString ( `"y": "hello"` ) +- opStructEnd ( `}` ) +- opEnd +``` + +※ When processing each operation, write the letters on the right. + +In addition, each opcode is managed by the following structure ( +Pseudo code ). + +```go +type opType int +const ( + opStructFieldHead opType = iota + opStructFieldInt + opStructFieldStirng + opStructEnd + opEnd +) +type opcode struct { + op opType + key []byte + next *opcode +} +``` + +The process of encoding using the opcode sequence is roughly implemented as follows. + +```go +func encode(code *opcode, b []byte, p unsafe.Pointer) ([]byte, error) { + for { + switch code.op { + case opStructFieldHead: + b = append(b, '{') + code = code.next + case opStructFieldInt: + b = append(b, code.key...) + b = appendInt((*int)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructFieldString: + b = append(b, code.key...) + b = appendString((*string)(unsafe.Pointer(uintptr(p)+code.offset))) + code = code.next + case opStructEnd: + b = append(b, '}') + code = code.next + case opEnd: + goto END + } + } +END: + return b, nil +} +``` + +In this way, the huge `switch-case` is used to encode by manipulating the linked list opcodes to avoid unnecessary function calls. + +### Opcode sequence optimization + +One of the advantages of encoding using the opcode sequence is the ease of optimization. +The opcode sequence mentioned above is actually converted into the following optimized operations and used. + +``` +- opStructFieldHeadInt ( `{"x": 1,` ) +- opStructEndString ( `"y": "hello"}` ) +- opEnd +``` + +It has been reduced from 5 opcodes to 3 opcodes ! +Reducing the number of opcodees means reducing the number of branches with `switch-case`. +In other words, the closer the number of operations is to 1, the faster the processing can be performed. + +In `go-json`, optimization to reduce the number of opcodes itself like the above and it speeds up by preparing opcodes with optimized paths. + +### Change recursive call from CALL to JMP + +Recursive processing is required during encoding if the type is defined recursively as follows: + +```go +type T struct { + X int + U *U +} + +type U struct { + T *T +} + +b, err := json.Marshal(&T{ + X: 1, + U: &U{ + T: &T{ + X: 2, + }, + }, +}) +fmt.Println(string(b)) // {"X":1,"U":{"T":{"X":2,"U":null}}} +``` + +In `go-json`, recursive processing is processed by the operation type of ` opStructFieldRecursive`. + +In this operation, after acquiring the opcode sequence used for recursive processing, the function is **not** called recursively as it is, but the necessary values ​​are saved by itself and implemented by moving to the next operation. + +The technique of implementing recursive processing with the `JMP` operation while avoiding the `CALL` operation is a famous technique for implementing a high-speed virtual machine. + +For more details, please refer to [the article](https://engineering.mercari.com/blog/entry/1599563768-081104c850) ( but Japanese only ). + +### Dispatch by typeptr from map to slice + +When retrieving the data cached from the type information by `typeptr`, we usually use map. +Map requires exclusive control, so use `sync.Map` for a naive implementation. + +However, this is slow, so it's a good idea to use the `atomic` package for exclusive control as implemented by `segmentio/encoding/json` ( https://github.com/segmentio/encoding/blob/master/json/codec.go#L41-L55 ). + +This implementation slows down the set instead of speeding up the get, but it works well because of the nature of the library, it encodes much more for the same type. + +However, as a result of profiling, I noticed that `runtime.mapaccess2` accounts for a significant percentage of the execution time. So I thought if I could change the lookup from map to slice. + +There is an API named `typelinks` defined in the `runtime` package that the `reflect` package uses internally. +This allows you to get all the type information defined in the binary at runtime. + +The fact that all type information can be acquired means that by constructing slices in advance with the acquired total number of type information, it is possible to look up with the value of `typeptr` without worrying about out-of-range access. + +However, if there is too much type information, it will use a lot of memory, so by default we will only use this optimization if the slice size fits within **2Mib** . + +If this approach is not available, it will fall back to the `atomic` based process described above. + +If you want to know more, please refer to the implementation [here](https://github.com/goccy/go-json/blob/master/internal/runtime/type.go#L36-L100) + +## Decoder + +### Dispatch by typeptr from map to slice + +Like the encoder, the decoder also uses typeptr to call the dedicated process. + +### Faster termination character inspection using NUL character + +In order to decode, you have to traverse the input buffer character by position. +At that time, if you check whether the buffer has reached the end, it will be very slow. + +`buf` : `[]byte` type variable. holds the string passed to the decoder +`cursor` : `int64` type variable. holds the current read position + +```go +buflen := len(buf) +for ; cursor < buflen; cursor++ { // compare cursor and buflen at all times, it is so slow. + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + } +} +``` + +Therefore, by adding the `NUL` (`\000`) character to the end of the read buffer as shown below, it is possible to check the termination character at the same time as other characters. + +```go +for { + switch buf[cursor] { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Use Boundary Check Elimination + +Due to the `NUL` character optimization, the Go compiler does a boundary check every time, even though `buf[cursor]` does not cause out-of-range access. + +Therefore, `go-json` eliminates boundary check by fetching characters for hotspot by pointer operation. For example, the following code. + +```go +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +p := (*sliceHeader)(&unsafe.Pointer(buf)).data +for { + switch char(p, cursor) { + case ' ', '\n', '\r', '\t': + case '\000': + return nil + } + cursor++ +} +``` + +### Checking the existence of fields of struct using Bitmaps + +I found by the profiling result, in the struct decode, lookup process for field was taking a long time. + +For example, consider decoding a string like `{"a":1,"b":2,"c":3}` into the following structure: + +```go +type T struct { + A int `json:"a"` + B int `json:"b"` + C int `json:"c"` +} +``` + +At this time, it was found that it takes a lot of time to acquire the decoding process corresponding to the field from the field name as shown below during the decoding process. + +```go +fieldName := decodeKey(buf, cursor) // "a" or "b" or "c" +decoder, exists := fieldToDecoderMap[fieldName] // so slow +if exists { + decoder(buf, cursor) +} else { + skipValue(buf, cursor) +} +``` + +To improve this process, `json-iterator/go` is optimized so that it can be branched by switch-case when the number of fields in the structure is 10 or less (switch-case is faster than map). However, there is a risk of hash collision because the value hashed by the FNV algorithm is used for conditional branching. Also, `gojay` processes this part at high speed by letting the library user yourself write `switch-case`. + + +`go-json` considers and implements a new approach that is different from these. I call this **bitmap field optimization**. + +The range of values ​​per character can be represented by `[256]byte`. Also, if the number of fields in the structure is 8 or less, `int8` type can represent the state of each field. +In other words, it has the following structure. + +- Base ( 8bit ): `00000000` +- Key "a": `00000001` ( assign key "a" to the first bit ) +- Key "b": `00000010` ( assign key "b" to the second bit ) +- Key "c": `00000100` ( assign key "c" to the third bit ) + +Bitmap structure is the following + +``` + | key index(0) | +------------------------ + 0 | 00000000 | + 1 | 00000000 | +~~ | | +97 (a) | 00000001 | +98 (b) | 00000010 | +99 (c) | 00000100 | +~~ | | +255 | 00000000 | +``` + +You can think of this as a Bitmap with a height of `256` and a width of the maximum string length in the field name. +In other words, it can be represented by the following type . + +```go +[maxFieldKeyLength][256]int8 +``` + +When decoding a field character, check whether the corresponding character exists by referring to the pre-built bitmap like the following. + +```go +var curBit int8 = math.MaxInt8 // 11111111 + +c := char(buf, cursor) +bit := bitmap[keyIdx][c] +curBit &= bit +if curBit == 0 { + // not found field +} +``` + +If `curBit` is not `0` until the end of the field string, then the string is +You may have hit one of the fields. +But the possibility is that if the decoded string is shorter than the field string, you will get a false hit. + +- input: `{"a":1}` +```go +type T struct { + X int `json:"abc"` +} +``` +※ Since `a` is shorter than `abc`, it can decode to the end of the field character without `curBit` being 0. + +Rest assured. In this case, it doesn't matter because you can tell if you hit by comparing the string length of `a` with the string length of `abc`. + +Finally, calculate the position of the bit where `1` is set and get the corresponding value, and you're done. + +Using this technique, field lookups are possible with only bitwise operations and access to slices. + +`go-json` uses a similar technique for fields with 9 or more and 16 or less fields. At this time, Bitmap is constructed as `[maxKeyLen][256]int16` type. + +Currently, this optimization is not performed when the maximum length of the field name is long (specifically, 64 bytes or more) in addition to the limitation of the number of fields from the viewpoint of saving memory usage. + +### Others + +I have done a lot of other optimizations. I will find time to write about them. If you have any questions about what's written here or other optimizations, please visit the `#go-json` channel on `gophers.slack.com` . + +## Reference + +Regarding the story of go-json, there are the following articles in Japanese only. + +- https://speakerdeck.com/goccy/zui-su-falsejsonraiburariwoqiu-mete +- https://engineering.mercari.com/blog/entry/1599563768-081104c850/ + +# Looking for Sponsors + +I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free. + +# License + +MIT diff --git a/vendor/github.com/goccy/go-json/color.go b/vendor/github.com/goccy/go-json/color.go new file mode 100644 index 0000000000000..e80b22b4869a1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/color.go @@ -0,0 +1,68 @@ +package json + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +type ( + ColorFormat = encoder.ColorFormat + ColorScheme = encoder.ColorScheme +) + +const escape = "\x1b" + +type colorAttr int + +//nolint:deadcode,varcheck +const ( + fgBlackColor colorAttr = iota + 30 + fgRedColor + fgGreenColor + fgYellowColor + fgBlueColor + fgMagentaColor + fgCyanColor + fgWhiteColor +) + +//nolint:deadcode,varcheck +const ( + fgHiBlackColor colorAttr = iota + 90 + fgHiRedColor + fgHiGreenColor + fgHiYellowColor + fgHiBlueColor + fgHiMagentaColor + fgHiCyanColor + fgHiWhiteColor +) + +func createColorFormat(attr colorAttr) ColorFormat { + return ColorFormat{ + Header: wrapColor(attr), + Footer: resetColor(), + } +} + +func wrapColor(attr colorAttr) string { + return fmt.Sprintf("%s[%dm", escape, attr) +} + +func resetColor() string { + return wrapColor(colorAttr(0)) +} + +var ( + DefaultColorScheme = &ColorScheme{ + Int: createColorFormat(fgHiMagentaColor), + Uint: createColorFormat(fgHiMagentaColor), + Float: createColorFormat(fgHiMagentaColor), + Bool: createColorFormat(fgHiYellowColor), + String: createColorFormat(fgHiGreenColor), + Binary: createColorFormat(fgHiRedColor), + ObjectKey: createColorFormat(fgHiCyanColor), + Null: createColorFormat(fgBlueColor), + } +) diff --git a/vendor/github.com/goccy/go-json/decode.go b/vendor/github.com/goccy/go-json/decode.go new file mode 100644 index 0000000000000..74c6ac3bcad75 --- /dev/null +++ b/vendor/github.com/goccy/go-json/decode.go @@ -0,0 +1,263 @@ +package json + +import ( + "context" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/decoder" + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type Decoder struct { + s *decoder.Stream +} + +const ( + nul = '\000' +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + rctx := decoder.TakeRuntimeContext() + rctx.Buf = src + rctx.Option.Flags = 0 + rctx.Option.Flags |= decoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + cursor, err := dec.Decode(rctx, 0, 0, header.ptr) + if err != nil { + decoder.ReleaseRuntimeContext(rctx) + return err + } + decoder.ReleaseRuntimeContext(rctx) + return validateEndBuf(src, cursor) +} + +var ( + pathDecoder = decoder.NewPathDecoder() +) + +func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { + if path.path.RootSelectorOnly { + return [][]byte{data}, nil + } + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + ctx.Option.Flags |= decoder.PathOption + ctx.Option.Path = path.path + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return nil, err + } + decoder.ReleaseRuntimeContext(ctx) + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return paths, nil +} + +func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + src := make([]byte, len(data)+1) // append nul byte to the end + copy(src, data) + + header := (*emptyInterface)(unsafe.Pointer(&v)) + + if err := validateType(header.typ, uintptr(header.ptr)); err != nil { + return err + } + dec, err := decoder.CompileToGetDecoder(header.typ) + if err != nil { + return err + } + + ctx := decoder.TakeRuntimeContext() + ctx.Buf = src + ctx.Option.Flags = 0 + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr)) + if err != nil { + decoder.ReleaseRuntimeContext(ctx) + return err + } + decoder.ReleaseRuntimeContext(ctx) + return validateEndBuf(src, cursor) +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +//nolint:staticcheck +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func validateType(typ *runtime.Type, p uintptr) error { + if typ == nil || typ.Kind() != reflect.Ptr || p == 0 { + return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)} + } + return nil +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may +// read data from r beyond the JSON values requested. +func NewDecoder(r io.Reader) *Decoder { + s := decoder.NewStream(r) + return &Decoder{ + s: s, + } +} + +// Buffered returns a reader of the data remaining in the Decoder's +// buffer. The reader is valid until the next call to Decode. +func (d *Decoder) Buffered() io.Reader { + return d.s.Buffered() +} + +// Decode reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about +// the conversion of JSON into a Go value. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeWithOption(v) +} + +// DecodeContext reads the next JSON-encoded value from its +// input and stores it in the value pointed to by v with context.Context. +func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { + d.s.Option.Flags |= decoder.ContextOption + d.s.Option.Context = ctx + return d.DecodeWithOption(v) +} + +func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error { + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + ptr := uintptr(header.ptr) + typeptr := uintptr(unsafe.Pointer(typ)) + // noescape trick for header.typ ( reflect.*rtype ) + copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + + if err := validateType(copiedType, ptr); err != nil { + return err + } + + dec, err := decoder.CompileToGetDecoder(typ) + if err != nil { + return err + } + if err := d.s.PrepareForDecode(); err != nil { + return err + } + s := d.s + for _, optFunc := range optFuncs { + optFunc(s.Option) + } + if err := dec.DecodeStream(s, 0, header.ptr); err != nil { + return err + } + s.Reset() + return nil +} + +func (d *Decoder) More() bool { + return d.s.More() +} + +func (d *Decoder) Token() (Token, error) { + return d.s.Token() +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (d *Decoder) DisallowUnknownFields() { + d.s.DisallowUnknownFields = true +} + +func (d *Decoder) InputOffset() int64 { + return d.s.TotalOffset() +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (d *Decoder) UseNumber() { + d.s.UseNumber = true +} diff --git a/vendor/github.com/goccy/go-json/docker-compose.yml b/vendor/github.com/goccy/go-json/docker-compose.yml new file mode 100644 index 0000000000000..db40c79ad5da3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/docker-compose.yml @@ -0,0 +1,13 @@ +version: '2' +services: + go-json: + image: golang:1.18 + volumes: + - '.:/go/src/go-json' + deploy: + resources: + limits: + memory: 620M + working_dir: /go/src/go-json + command: | + sh -c "go test -c . && ls go-json.test" diff --git a/vendor/github.com/goccy/go-json/encode.go b/vendor/github.com/goccy/go-json/encode.go new file mode 100644 index 0000000000000..c5173825a95aa --- /dev/null +++ b/vendor/github.com/goccy/go-json/encode.go @@ -0,0 +1,326 @@ +package json + +import ( + "context" + "io" + "os" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/encoder/vm" + "github.com/goccy/go-json/internal/encoder/vm_color" + "github.com/goccy/go-json/internal/encoder/vm_color_indent" + "github.com/goccy/go-json/internal/encoder/vm_indent" +) + +// An Encoder writes JSON values to an output stream. +type Encoder struct { + w io.Writer + enabledIndent bool + enabledHTMLEscape bool + prefix string + indentStr string +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w, enabledHTMLEscape: true} +} + +// Encode writes the JSON encoding of v to the stream, followed by a newline character. +// +// See the documentation for Marshal for details about the conversion of Go values to JSON. +func (e *Encoder) Encode(v interface{}) error { + return e.EncodeWithOption(v) +} + +// EncodeWithOption call Encode with EncodeOption. +func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error { + ctx := encoder.TakeRuntimeContext() + ctx.Option.Flag = 0 + + err := e.encodeWithOption(ctx, v, optFuncs...) + + encoder.ReleaseRuntimeContext(ctx) + return err +} + +// EncodeContext call Encode with context.Context and EncodeOption. +func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag |= encoder.ContextOption + rctx.Option.Context = ctx + + err := e.encodeWithOption(rctx, v, optFuncs...) //nolint: contextcheck + + encoder.ReleaseRuntimeContext(rctx) + return err +} + +func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error { + if e.enabledHTMLEscape { + ctx.Option.Flag |= encoder.HTMLEscapeOption + } + ctx.Option.Flag |= encoder.NormalizeUTF8Option + ctx.Option.DebugOut = os.Stdout + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + var ( + buf []byte + err error + ) + if e.enabledIndent { + buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr) + } else { + buf, err = encode(ctx, v) + } + if err != nil { + return err + } + if e.enabledIndent { + buf = buf[:len(buf)-2] + } else { + buf = buf[:len(buf)-1] + } + buf = append(buf, '\n') + if _, err := e.w.Write(buf); err != nil { + return err + } + return nil +} + +// SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings. +// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML. +// +// In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior. +func (e *Encoder) SetEscapeHTML(on bool) { + e.enabledHTMLEscape = on +} + +// SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent). +// Calling SetIndent("", "") disables indentation. +func (e *Encoder) SetIndent(prefix, indent string) { + if prefix == "" && indent == "" { + e.enabledIndent = false + return + } + e.prefix = prefix + e.indentStr = indent + e.enabledIndent = true +} + +func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + rctx := encoder.TakeRuntimeContext() + rctx.Option.Flag = 0 + rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption + rctx.Option.Context = ctx + for _, optFunc := range optFuncs { + optFunc(rctx.Option) + } + + buf, err := encode(rctx, v) //nolint: contextcheck + if err != nil { + encoder.ReleaseRuntimeContext(rctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(rctx) + return copied, nil +} + +func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encode(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalNoEscape(v interface{}) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) + + buf, err := encodeNoEscape(ctx, v) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + // this line exists to escape call of `runtime.makeslicecopy` . + // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, + // dst buffer size and src buffer size are differrent. + // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. + buf = buf[:len(buf)-1] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + ctx := encoder.TakeRuntimeContext() + + ctx.Option.Flag = 0 + ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption) + for _, optFunc := range optFuncs { + optFunc(ctx.Option) + } + + buf, err := encodeIndent(ctx, v, prefix, indent) + if err != nil { + encoder.ReleaseRuntimeContext(ctx) + return nil, err + } + + buf = buf[:len(buf)-2] + copied := make([]byte, len(buf)) + copy(copied, buf) + + encoder.ReleaseRuntimeContext(ctx) + return copied, nil +} + +func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + ctx.Buf = buf + return buf, nil +} + +func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendComma(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunCode(ctx, b, codeSet) + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) { + b := ctx.Buf[:0] + if v == nil { + b = encoder.AppendNull(ctx, b) + b = encoder.AppendCommaIndent(ctx, b) + return b, nil + } + header := (*emptyInterface)(unsafe.Pointer(&v)) + typ := header.typ + + typeptr := uintptr(unsafe.Pointer(typ)) + codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) + if err != nil { + return nil, err + } + + p := uintptr(header.ptr) + ctx.Init(p, codeSet.CodeLength) + buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent) + + ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) + + if err != nil { + return nil, err + } + + ctx.Buf = buf + return buf, nil +} + +func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.DebugRun(ctx, b, codeSet) + } + return vm.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color.Run(ctx, b, codeSet) + } + return vm.Run(ctx, b, codeSet) +} + +func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) { + ctx.Prefix = []byte(prefix) + ctx.IndentStr = []byte(indent) + if (ctx.Option.Flag & encoder.DebugOption) != 0 { + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.DebugRun(ctx, b, codeSet) + } + return vm_indent.DebugRun(ctx, b, codeSet) + } + if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { + return vm_color_indent.Run(ctx, b, codeSet) + } + return vm_indent.Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/error.go b/vendor/github.com/goccy/go-json/error.go new file mode 100644 index 0000000000000..5b2dcee50ecf4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/error.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/goccy/go-json/internal/errors" +) + +// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when +// attempting to encode a string value with invalid UTF-8 sequences. +// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by +// replacing invalid bytes with the Unicode replacement rune U+FFFD. +// +// Deprecated: No longer used; kept for compatibility. +type InvalidUTF8Error = errors.InvalidUTF8Error + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError = errors.InvalidUnmarshalError + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError = errors.MarshalerError + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = errors.SyntaxError + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError = errors.UnmarshalFieldError + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError = errors.UnmarshalTypeError + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError = errors.UnsupportedTypeError + +type UnsupportedValueError = errors.UnsupportedValueError + +type PathError = errors.PathError diff --git a/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go new file mode 100644 index 0000000000000..b6876cf0d049c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go @@ -0,0 +1,41 @@ +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type anonymousFieldDecoder struct { + structType *runtime.Type + offset uintptr + dec Decoder +} + +func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder { + return &anonymousFieldDecoder{ + structType: structType, + offset: offset, + dec: dec, + } +} + +func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + if *(*unsafe.Pointer)(p) == nil { + *(*unsafe.Pointer)(p) = unsafe_New(d.structType) + } + p = *(*unsafe.Pointer)(p) + return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) +} + +func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return d.dec.DecodePath(ctx, cursor, depth) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go new file mode 100644 index 0000000000000..4b23ed43fe20f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -0,0 +1,176 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type arrayDecoder struct { + elemType *runtime.Type + size uintptr + valueDecoder Decoder + alen int + structName string + fieldName string + zeroValue unsafe.Pointer +} + +func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { + // workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly. + zeroValuePtr := unsafe_New(elemType) + zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr)) + return &arrayDecoder{ + valueDecoder: dec, + elemType: elemType, + size: elemType.Size(), + alen: alen, + structName: structName, + fieldName: fieldName, + zeroValue: zeroValue, + } +} + +func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case '[': + idx := 0 + s.cursor++ + if s.skipWhiteSpace() == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + } + for { + if idx < d.alen { + if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil { + return err + } + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + idx++ + switch s.skipWhiteSpace() { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + s.cursor++ + return nil + case ',': + s.cursor++ + continue + case nul: + if s.read() { + s.cursor++ + continue + } + goto ERROR + default: + goto ERROR + } + } + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + s.cursor++ + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset()) +} + +func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '[': + idx := 0 + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + } + for { + if idx < d.alen { + c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + idx++ + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + for idx < d.alen { + *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue + idx++ + } + cursor++ + return cursor, nil + case ',': + cursor++ + continue + default: + return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor) + } + } + default: + return 0, errors.ErrUnexpectedEndOfJSON("array", cursor) + } + } +} + +func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: array decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/assign.go b/vendor/github.com/goccy/go-json/internal/decoder/assign.go new file mode 100644 index 0000000000000..c53e6ad9fc57b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/assign.go @@ -0,0 +1,438 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" +) + +var ( + nilValue = reflect.ValueOf(nil) +) + +func AssignValue(src, dst reflect.Value) error { + if dst.Type().Kind() != reflect.Ptr { + return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type()) + } + casted, err := castValue(dst.Elem().Type(), src) + if err != nil { + return err + } + dst.Elem().Set(casted) + return nil +} + +func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) { + switch t.Kind() { + case reflect.Int: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int(vv.Int())), nil + case reflect.Int8: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int8(vv.Int())), nil + case reflect.Int16: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int16(vv.Int())), nil + case reflect.Int32: + vv, err := castInt(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(int32(vv.Int())), nil + case reflect.Int64: + return castInt(v) + case reflect.Uint: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint(vv.Uint())), nil + case reflect.Uint8: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint8(vv.Uint())), nil + case reflect.Uint16: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint16(vv.Uint())), nil + case reflect.Uint32: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uint32(vv.Uint())), nil + case reflect.Uint64: + return castUint(v) + case reflect.Uintptr: + vv, err := castUint(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(uintptr(vv.Uint())), nil + case reflect.String: + return castString(v) + case reflect.Bool: + return castBool(v) + case reflect.Float32: + vv, err := castFloat(v) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(float32(vv.Float())), nil + case reflect.Float64: + return castFloat(v) + case reflect.Array: + return castArray(t, v) + case reflect.Slice: + return castSlice(t, v) + case reflect.Map: + return castMap(t, v) + case reflect.Struct: + return castStruct(t, v) + } + return v, nil +} + +func castInt(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(int64(v.Uint())), nil + case reflect.String: + i64, err := strconv.ParseInt(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(i64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(int64(1)), nil + } + return reflect.ValueOf(int64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(int64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castInt(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to int64 from empty slice") + case reflect.Interface: + return castInt(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to int64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to int64 from struct") + case reflect.Ptr: + return castInt(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind()) +} + +func castUint(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(uint64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v, nil + case reflect.String: + u64, err := strconv.ParseUint(v.String(), 10, 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(u64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(uint64(1)), nil + } + return reflect.ValueOf(uint64(0)), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(uint64(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castUint(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice") + case reflect.Interface: + return castUint(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to uint64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to uint64 from struct") + case reflect.Ptr: + return castUint(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind()) +} + +func castString(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(fmt.Sprint(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(fmt.Sprint(v.Uint())), nil + case reflect.String: + return v, nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf("true"), nil + } + return reflect.ValueOf("false"), nil + case reflect.Float32, reflect.Float64: + return reflect.ValueOf(fmt.Sprint(v.Float())), nil + case reflect.Array: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castString(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castString(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castString(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind()) +} + +func castBool(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Int() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Uint() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint()) + case reflect.String: + b, err := strconv.ParseBool(v.String()) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(b), nil + case reflect.Bool: + return v, nil + case reflect.Float32, reflect.Float64: + switch v.Float() { + case 0: + return reflect.ValueOf(false), nil + case 1: + return reflect.ValueOf(true), nil + } + return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float()) + case reflect.Array: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castBool(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to string from empty slice") + case reflect.Interface: + return castBool(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to string from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to string from struct") + case reflect.Ptr: + return castBool(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind()) +} + +func castFloat(v reflect.Value) (reflect.Value, error) { + switch v.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return reflect.ValueOf(float64(v.Int())), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return reflect.ValueOf(float64(v.Uint())), nil + case reflect.String: + f64, err := strconv.ParseFloat(v.String(), 64) + if err != nil { + return nilValue, err + } + return reflect.ValueOf(f64), nil + case reflect.Bool: + if v.Bool() { + return reflect.ValueOf(float64(1)), nil + } + return reflect.ValueOf(float64(0)), nil + case reflect.Float32, reflect.Float64: + return v, nil + case reflect.Array: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty array") + case reflect.Slice: + if v.Len() > 0 { + return castFloat(v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to float64 from empty slice") + case reflect.Interface: + return castFloat(reflect.ValueOf(v.Interface())) + case reflect.Map: + return nilValue, fmt.Errorf("failed to cast to float64 from map") + case reflect.Struct: + return nilValue, fmt.Errorf("failed to cast to float64 from struct") + case reflect.Ptr: + return castFloat(v.Elem()) + } + return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind()) +} + +func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castArray(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to array from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + if t.Len() != v.Len() { + return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len()) + } + ret := reflect.New(t).Elem() + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) { + kind := v.Type().Kind() + if kind == reflect.Interface { + return castSlice(t, reflect.ValueOf(v.Interface())) + } + if kind != reflect.Slice && kind != reflect.Array { + return nilValue, fmt.Errorf("failed to cast to slice from %s", kind) + } + if t.Elem() == v.Type().Elem() { + return v, nil + } + ret := reflect.MakeSlice(t, v.Len(), v.Len()) + for i := 0; i < v.Len(); i++ { + vv, err := castValue(t.Elem(), v.Index(i)) + if err != nil { + return nilValue, err + } + ret.Index(i).Set(vv) + } + return ret, nil +} + +func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.MakeMap(t) + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key, err := castValue(t.Key(), iter.Key()) + if err != nil { + return nilValue, err + } + value, err := castValue(t.Elem(), iter.Value()) + if err != nil { + return nilValue, err + } + ret.SetMapIndex(key, value) + } + return ret, nil + case reflect.Interface: + return castMap(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castMap(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to map from empty slice") + } + return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind()) +} + +func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) { + ret := reflect.New(t).Elem() + switch v.Type().Kind() { + case reflect.Map: + iter := v.MapRange() + for iter.Next() { + key := iter.Key() + k, err := castString(key) + if err != nil { + return nilValue, err + } + fieldName := k.String() + field, ok := t.FieldByName(fieldName) + if ok { + value, err := castValue(field.Type, iter.Value()) + if err != nil { + return nilValue, err + } + ret.FieldByName(fieldName).Set(value) + } + } + return ret, nil + case reflect.Struct: + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + ret.FieldByName(name).Set(v.FieldByName(name)) + } + return ret, nil + case reflect.Interface: + return castStruct(t, reflect.ValueOf(v.Interface())) + case reflect.Slice: + if v.Len() > 0 { + return castStruct(t, v.Index(0)) + } + return nilValue, fmt.Errorf("failed to cast to struct from empty slice") + default: + return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind()) + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bool.go b/vendor/github.com/goccy/go-json/internal/decoder/bool.go new file mode 100644 index 0000000000000..ba6cf5bc496f2 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bool.go @@ -0,0 +1,83 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type boolDecoder struct { + structName string + fieldName string +} + +func newBoolDecoder(structName, fieldName string) *boolDecoder { + return &boolDecoder{structName: structName, fieldName: fieldName} +} + +func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**bool)(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + case nul: + if s.read() { + c = s.char() + continue + } + goto ERROR + } + break + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset()) +} + +func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**bool)(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**bool)(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + } + return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) +} + +func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: bool decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/bytes.go b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go new file mode 100644 index 0000000000000..939bf4327411c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/bytes.go @@ -0,0 +1,118 @@ +package decoder + +import ( + "encoding/base64" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type bytesDecoder struct { + typ *runtime.Type + sliceDecoder Decoder + stringDecoder *stringDecoder + structName string + fieldName string +} + +func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder { + var unmarshalDecoder Decoder + switch { + case runtime.PtrTo(typ).Implements(unmarshalJSONType): + unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName) + case runtime.PtrTo(typ).Implements(unmarshalTextType): + unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName) + default: + unmarshalDecoder, _ = compileUint8(typ, structName, fieldName) + } + return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName) +} + +func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder { + return &bytesDecoder{ + typ: typ, + sliceDecoder: byteUnmarshalerSliceDecoder(typ, structName, fieldName), + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + } +} + +func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamBinary(s, depth, p) + if err != nil { + return err + } + if bytes == nil { + s.reset() + return nil + } + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + buf := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(buf, bytes) + if err != nil { + return err + } + *(*[]byte)(p) = buf[:n] + s.reset() + return nil +} + +func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeBinary(ctx, cursor, depth, p) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) + b := make([]byte, decodedLen) + n, err := base64.StdEncoding.Decode(b, bytes) + if err != nil { + return 0, err + } + *(*[]byte)(p) = b[:n] + return cursor, nil +} + +func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path") +} + +func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { + c := s.skipWhiteSpace() + if c == '[' { + if d.sliceDecoder == nil { + return nil, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + err := d.sliceDecoder.DecodeStream(s, depth, p) + return nil, err + } + return d.stringDecoder.decodeStreamByte(s) +} + +func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '[' { + if d.sliceDecoder == nil { + return nil, 0, &errors.UnmarshalTypeError{ + Type: runtime.RType2Type(d.typ), + Offset: cursor, + } + } + c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p) + if err != nil { + return nil, 0, err + } + return nil, c, nil + } + return d.stringDecoder.decodeByte(buf, cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go new file mode 100644 index 0000000000000..fab6437647bb5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -0,0 +1,487 @@ +package decoder + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "sync/atomic" + "unicode" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var ( + jsonNumberType = reflect.TypeOf(json.Number("")) + typeAddr *runtime.TypeAddr + cachedDecoderMap unsafe.Pointer // map[uintptr]decoder + cachedDecoder []Decoder +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadDecoderMap() map[uintptr]Decoder { + p := atomic.LoadPointer(&cachedDecoderMap) + return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) +} + +func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + newDecoderMap := make(map[uintptr]Decoder, len(m)+1) + newDecoderMap[typ] = dec + + for k, v := range m { + newDecoderMap[k] = v + } + + atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap))) +} + +func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) { + decoderMap := loadDecoderMap() + if dec, exists := decoderMap[typeptr]; exists { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + storeDecoder(typeptr, dec, decoderMap) + return dec, nil +} + +func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil + } + return compile(typ.Elem(), "", "", structTypeToDecoder) +} + +func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + + switch typ.Kind() { + case reflect.Ptr: + return compilePtr(typ, structName, fieldName, structTypeToDecoder) + case reflect.Struct: + return compileStruct(typ, structName, fieldName, structTypeToDecoder) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + return compileBytes(elem, structName, fieldName) + } + return compileSlice(typ, structName, fieldName, structTypeToDecoder) + case reflect.Array: + return compileArray(typ, structName, fieldName, structTypeToDecoder) + case reflect.Map: + return compileMap(typ, structName, fieldName, structTypeToDecoder) + case reflect.Interface: + return compileInterface(typ, structName, fieldName) + case reflect.Uintptr: + return compileUint(typ, structName, fieldName) + case reflect.Int: + return compileInt(typ, structName, fieldName) + case reflect.Int8: + return compileInt8(typ, structName, fieldName) + case reflect.Int16: + return compileInt16(typ, structName, fieldName) + case reflect.Int32: + return compileInt32(typ, structName, fieldName) + case reflect.Int64: + return compileInt64(typ, structName, fieldName) + case reflect.Uint: + return compileUint(typ, structName, fieldName) + case reflect.Uint8: + return compileUint8(typ, structName, fieldName) + case reflect.Uint16: + return compileUint16(typ, structName, fieldName) + case reflect.Uint32: + return compileUint32(typ, structName, fieldName) + case reflect.Uint64: + return compileUint64(typ, structName, fieldName) + case reflect.String: + return compileString(typ, structName, fieldName) + case reflect.Bool: + return compileBool(structName, fieldName) + case reflect.Float32: + return compileFloat32(structName, fieldName) + case reflect.Float64: + return compileFloat64(structName, fieldName) + case reflect.Func: + return compileFunc(typ, structName, fieldName) + } + return newInvalidDecoder(typ, structName, fieldName), nil +} + +func isStringTagSupportedType(typ *runtime.Type) bool { + switch { + case implementsUnmarshalJSONType(runtime.PtrTo(typ)): + return false + case runtime.PtrTo(typ).Implements(unmarshalTextType): + return false + } + switch typ.Kind() { + case reflect.Map: + return false + case reflect.Slice: + return false + case reflect.Array: + return false + case reflect.Struct: + return false + case reflect.Interface: + return false + } + return true +} + +func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + if runtime.PtrTo(typ).Implements(unmarshalTextType) { + return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil + } + if typ.Kind() == reflect.String { + return newStringDecoder(structName, fieldName), nil + } + dec, err := compile(typ, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + for { + switch t := dec.(type) { + case *stringDecoder, *interfaceDecoder: + return dec, nil + case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder: + return newWrappedStringDecoder(typ, dec, structName, fieldName), nil + case *ptrDecoder: + dec = t.dec + default: + return newInvalidDecoder(typ, structName, fieldName), nil + } + } +} + +func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil +} + +func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int)(p) = int(v) + }), nil +} + +func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int8)(p) = int8(v) + }), nil +} + +func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int16)(p) = int16(v) + }), nil +} + +func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int32)(p) = int32(v) + }), nil +} + +func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { + *(*int64)(p) = v + }), nil +} + +func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint)(p) = uint(v) + }), nil +} + +func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint8)(p) = uint8(v) + }), nil +} + +func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint16)(p) = uint16(v) + }), nil +} + +func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint32)(p) = uint32(v) + }), nil +} + +func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { + *(*uint64)(p) = v + }), nil +} + +func compileFloat32(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float32)(p) = float32(v) + }), nil +} + +func compileFloat64(structName, fieldName string) (Decoder, error) { + return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*float64)(p) = v + }), nil +} + +func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + if typ == runtime.Type2RType(jsonNumberType) { + return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*json.Number)(p) = v + }), nil + } + return newStringDecoder(structName, fieldName), nil +} + +func compileBool(structName, fieldName string) (Decoder, error) { + return newBoolDecoder(structName, fieldName), nil +} + +func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newBytesDecoder(typ, structName, fieldName), nil +} + +func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil +} + +func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + elem := typ.Elem() + decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil +} + +func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) + if err != nil { + return nil, err + } + return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil +} + +func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) { + return newInterfaceDecoder(typ, structName, fieldName), nil +} + +func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) { + return newFuncDecoder(typ, strutName, fieldName), nil +} + +func typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { + fieldNum := typ.NumField() + fieldMap := map[string]*structFieldSet{} + typeptr := uintptr(unsafe.Pointer(typ)) + if dec, exists := structTypeToDecoder[typeptr]; exists { + return dec, nil + } + structDec := newStructDecoder(structName, fieldName, fieldMap) + structTypeToDecoder[typeptr] = structDec + structName = typ.Name() + tags := typeToStructTags(typ) + allFields := []*structFieldSet{} + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + isUnexportedField := unicode.IsLower([]rune(field.Name)[0]) + tag := runtime.StructTagFromField(field) + dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder) + if err != nil { + return nil, err + } + if field.Anonymous && !tag.IsTaggedKey { + if stDec, ok := dec.(*structDecoder); ok { + if runtime.Type2RType(field.Type) == typ { + // recursive definition + continue + } + for k, v := range stDec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: v.dec, + offset: field.Offset + v.offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + } + allFields = append(allFields, fieldSet) + } + } else if pdec, ok := dec.(*ptrDecoder); ok { + contentDec := pdec.contentDecoder() + if pdec.typ == typ { + // recursive definition + continue + } + var fieldSetErr error + if isUnexportedField { + fieldSetErr = fmt.Errorf( + "json: cannot set embedded pointer to unexported struct: %v", + field.Type.Elem(), + ) + } + if dec, ok := contentDec.(*structDecoder); ok { + for k, v := range dec.fieldMap { + if tags.ExistsKey(k) { + continue + } + fieldSet := &structFieldSet{ + dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec), + offset: field.Offset, + isTaggedKey: v.isTaggedKey, + key: k, + keyLen: int64(len(k)), + err: fieldSetErr, + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: pdec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: field.Name, + keyLen: int64(len(field.Name)), + } + allFields = append(allFields, fieldSet) + } + } else { + if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) { + dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name) + } + var key string + if tag.Key != "" { + key = tag.Key + } else { + key = field.Name + } + fieldSet := &structFieldSet{ + dec: dec, + offset: field.Offset, + isTaggedKey: tag.IsTaggedKey, + key: key, + keyLen: int64(len(key)), + } + allFields = append(allFields, fieldSet) + } + } + for _, set := range filterDuplicatedFields(allFields) { + fieldMap[set.key] = set + lower := strings.ToLower(set.key) + if _, exists := fieldMap[lower]; !exists { + // first win + fieldMap[lower] = set + } + } + delete(structTypeToDecoder, typeptr) + structDec.tryOptimize() + return structDec, nil +} + +func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet { + fieldMap := map[string][]*structFieldSet{} + for _, field := range allFields { + fieldMap[field.key] = append(fieldMap[field.key], field) + } + duplicatedFieldMap := map[string]struct{}{} + for k, sets := range fieldMap { + sets = filterFieldSets(sets) + if len(sets) != 1 { + duplicatedFieldMap[k] = struct{}{} + } + } + + filtered := make([]*structFieldSet, 0, len(allFields)) + for _, field := range allFields { + if _, exists := duplicatedFieldMap[field.key]; exists { + continue + } + filtered = append(filtered, field) + } + return filtered +} + +func filterFieldSets(sets []*structFieldSet) []*structFieldSet { + if len(sets) == 1 { + return sets + } + filtered := make([]*structFieldSet, 0, len(sets)) + for _, set := range sets { + if set.isTaggedKey { + filtered = append(filtered, set) + } + } + return filtered +} + +func implementsUnmarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go new file mode 100644 index 0000000000000..eb7e2b1345d7f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -0,0 +1,29 @@ +//go:build !race +// +build !race + +package decoder + +import ( + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if dec := cachedDecoder[index]; dec != nil { + return dec, nil + } + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + cachedDecoder[index] = dec + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go new file mode 100644 index 0000000000000..49cdda4a172f2 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -0,0 +1,37 @@ +//go:build race +// +build race + +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +var decMu sync.RWMutex + +func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if typeptr > typeAddr.MaxTypeAddr { + return compileToGetDecoderSlowPath(typeptr, typ) + } + + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + decMu.RLock() + if dec := cachedDecoder[index]; dec != nil { + decMu.RUnlock() + return dec, nil + } + decMu.RUnlock() + + dec, err := compileHead(typ, map[uintptr]Decoder{}) + if err != nil { + return nil, err + } + decMu.Lock() + cachedDecoder[index] = dec + decMu.Unlock() + return dec, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/context.go b/vendor/github.com/goccy/go-json/internal/decoder/context.go new file mode 100644 index 0000000000000..cb2ffdafd037f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/context.go @@ -0,0 +1,254 @@ +package decoder + +import ( + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type RuntimeContext struct { + Buf []byte + Option *Option +} + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Option: &Option{}, + } + }, + } +) + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} + +var ( + isWhiteSpace = [256]bool{} +) + +func init() { + isWhiteSpace[' '] = true + isWhiteSpace['\n'] = true + isWhiteSpace['\t'] = true + isWhiteSpace['\r'] = true +} + +func char(ptr unsafe.Pointer, offset int64) byte { + return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { + for isWhiteSpace[buf[cursor]] { + cursor++ + } + return cursor +} + +func skipObject(buf []byte, cursor, depth int64) (int64, error) { + braceCount := 1 + for { + switch buf[cursor] { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + braceCount-- + if braceCount == 0 { + return cursor + 1, nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipArray(buf []byte, cursor, depth int64) (int64, error) { + bracketCount := 1 + for { + switch buf[cursor] { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + return cursor + 1, nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func skipValue(buf []byte, cursor, depth int64) (int64, error) { + for { + switch buf[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return skipObject(buf, cursor+1, depth+1) + case '[': + return skipArray(buf, cursor+1, depth+1) + case '"': + for { + cursor++ + switch buf[cursor] { + case '\\': + cursor++ + if buf[cursor] == nul { + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + return cursor + 1, nil + case nul: + return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + if floatTable[buf[cursor]] { + continue + } + break + } + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + default: + return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + } +} + +func validateTrue(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if buf[cursor+1] != 'r' { + return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor) + } + if buf[cursor+2] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor) + } + if buf[cursor+3] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor) + } + return nil +} + +func validateFalse(buf []byte, cursor int64) error { + if cursor+4 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if buf[cursor+1] != 'a' { + return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor) + } + if buf[cursor+3] != 's' { + return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor) + } + if buf[cursor+4] != 'e' { + return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor) + } + return nil +} + +func validateNull(buf []byte, cursor int64) error { + if cursor+3 >= int64(len(buf)) { + return errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if buf[cursor+1] != 'u' { + return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor) + } + if buf[cursor+2] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor) + } + if buf[cursor+3] != 'l' { + return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor) + } + return nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/float.go b/vendor/github.com/goccy/go-json/internal/decoder/float.go new file mode 100644 index 0000000000000..9b2eb8b35a4db --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/float.go @@ -0,0 +1,170 @@ +package decoder + +import ( + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type floatDecoder struct { + op func(unsafe.Pointer, float64) + structName string + fieldName string +} + +func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder { + return &floatDecoder{op: op, structName: structName, fieldName: fieldName} +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } + + validEndNumberChar = [256]bool{ + nul: true, + ' ': true, + '\t': true, + '\r': true, + '\n': true, + ',': true, + ':': true, + '}': true, + ']': true, + } +) + +func floatBytes(s *Stream) []byte { + start := s.cursor + for { + s.cursor++ + if floatTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + return s.buf[start:s.cursor] +} + +func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset()) +} + +func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + } +} + +func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + str := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, f64) + return nil +} + +func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + if !validEndNumberChar[buf[cursor]] { + return 0, errors.ErrUnexpectedEndOfJSON("float", cursor) + } + s := *(*string)(unsafe.Pointer(&bytes)) + f64, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, errors.ErrSyntax(err.Error(), cursor) + } + d.op(p, f64) + return cursor, nil +} + +func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + bytes, c, err := d.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/func.go b/vendor/github.com/goccy/go-json/internal/decoder/func.go new file mode 100644 index 0000000000000..4cc12ca81f142 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/func.go @@ -0,0 +1,146 @@ +package decoder + +import ( + "bytes" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type funcDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder { + fnDecoder := &funcDecoder{typ, structName, fieldName} + return fnDecoder +} + +func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '"': + return &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + case 't': + if err := trueBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + case 'f': + if err := falseBytes(s); err == nil { + return &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + } + } + } + return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset()) +} + +func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '"': + return 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + case 't': + if err := validateTrue(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + case 'f': + if err := validateFalse(buf, start); err == nil { + return 0, &errors.UnmarshalTypeError{ + Value: "boolean", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + } + } + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: func decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/int.go b/vendor/github.com/goccy/go-json/internal/decoder/int.go new file mode 100644 index 0000000000000..1a7f081994c05 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/int.go @@ -0,0 +1,246 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type intDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, int64) + structName string + fieldName string +} + +func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder { + return &intDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +var ( + pow10i64 = [...]int64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, + } + pow10i64Len = len(pow10i64) +) + +func (d *intDecoder) parseInt(b []byte) (int64, error) { + isNegative := false + if b[0] == '-' { + b = b[1:] + isNegative = true + } + maxDigit := len(b) + if maxDigit > pow10i64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := int64(0) + for i := 0; i < maxDigit; i++ { + c := int64(b[i]) - 48 + digitValue := pow10i64[maxDigit-i-1] + sum += c * digitValue + } + if isNegative { + return -1 * sum, nil + } + return sum, nil +} + +var ( + numTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + } +) + +var ( + numZeroBuf = []byte{'0'} +) + +func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + if len(num) < 2 { + goto ERROR + } + return num, nil + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto ERROR + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset()) +} + +func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[char(b, cursor)] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor) + } + } +} + +func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + i64, err := d.parseInt(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, i64) + s.reset() + return nil +} + +func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + + i64, err := d.parseInt(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Int8: + if i64 < -1*(1<<7) || (1<<7) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int16: + if i64 < -1*(1<<15) || (1<<15) <= i64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Int32: + if i64 < -1*(1<<31) || (1<<31) <= i64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, i64) + return cursor, nil +} + +func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: int decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/interface.go b/vendor/github.com/goccy/go-json/internal/decoder/interface.go new file mode 100644 index 0000000000000..45c69ab8c7090 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/interface.go @@ -0,0 +1,528 @@ +package decoder + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type interfaceDecoder struct { + typ *runtime.Type + structName string + fieldName string + sliceDecoder *sliceDecoder + mapDecoder *mapDecoder + floatDecoder *floatDecoder + numberDecoder *numberDecoder + stringDecoder *stringDecoder +} + +func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: structName, + fieldName: fieldName, + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder(structName, fieldName), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + structName, + fieldName, + ) + return ifaceDecoder +} + +func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder { + emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName) + stringDecoder := newStringDecoder(structName, fieldName) + return &interfaceDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + sliceDecoder: newSliceDecoder( + emptyIfaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + structName, fieldName, + ), + mapDecoder: newMapDecoder( + interfaceMapType, + stringType, + stringDecoder, + interfaceMapType.Elem(), + emptyIfaceDecoder, + structName, + fieldName, + ), + floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: stringDecoder, + } +} + +func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { + if s.UseNumber { + return d.numberDecoder + } + return d.floatDecoder +} + +var ( + emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) + EmptyInterfaceType = emptyInterfaceType + interfaceMapType = runtime.Type2RType( + reflect.TypeOf((*map[string]interface{})(nil)).Elem(), + ) + stringType = runtime.Type2RType( + reflect.TypeOf(""), + ) +) + +func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return err + } + return nil +} + +func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil { + return err + } + return nil +} + +func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil { + return 0, err + } + return end, nil +} + +func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error { + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + + dst := make([]byte, len(src)) + copy(dst, src) + + if err := unmarshaler.UnmarshalText(dst); err != nil { + return err + } + return nil +} + +func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) { + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + if s, ok := unquoteBytes(src); ok { + src = s + } + if err := unmarshaler.UnmarshalText(src); err != nil { + return 0, err + } + return end, nil +} + +func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error { + c := s.skipWhiteSpace() + for { + switch c { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil { + return err + } + *(*interface{})(p) = v + return nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.numDecoder(s).DecodeStream(s, depth, p) + case '"': + s.cursor++ + start := s.cursor + for { + switch s.char() { + case '\\': + if _, err := decodeEscapeString(s, nil); err != nil { + return err + } + case '"': + literal := s.buf[start:s.cursor] + s.cursor++ + *(*interface{})(p) = string(literal) + return nil + case nul: + if s.read() { + continue + } + return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.cursor++ + } + case 't': + if err := trueBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = true + return nil + case 'f': + if err := falseBytes(s); err != nil { + return err + } + **(**interface{})(unsafe.Pointer(&p)) = false + return nil + case 'n': + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + case nul: + if s.read() { + c = s.char() + continue + } + } + break + } + return errors.ErrInvalidBeginningOfValue(c, s.totalOffset()) +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeStreamUnmarshalerContext(s, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeStreamUnmarshaler(s, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeStreamTextUnmarshaler(s, depth, u, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + return d.errUnmarshalType(rv.Type(), s.totalOffset()) + } + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeStreamEmptyInterface(s, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeStreamEmptyInterface(s, depth, p) + } + if s.skipWhiteSpace() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*interface{})(p) = nil + return nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return err + } + return decoder.DecodeStream(s, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typ.String(), + Type: typ, + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + rv := reflect.ValueOf(runtimeInterfaceValue) + if rv.NumMethod() > 0 && rv.CanInterface() { + if u, ok := rv.Interface().(unmarshalerContext); ok { + return decodeUnmarshalerContext(ctx, buf, cursor, depth, u) + } + if u, ok := rv.Interface().(json.Unmarshaler); ok { + return decodeUnmarshaler(buf, cursor, depth, u) + } + if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return decodeTextUnmarshaler(buf, cursor, depth, u, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return 0, d.errUnmarshalType(rv.Type(), cursor) + } + + iface := rv.Interface() + ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) + typ := ifaceHeader.typ + if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { + // concrete type is empty interface + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { + return d.decodeEmptyInterface(ctx, cursor, depth, p) + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + decoder, err := CompileToGetDecoder(typ) + if err != nil { + return 0, err + } + return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr) +} + +func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + var v map[string]interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '[': + var v []interface{} + ptr := unsafe.Pointer(&v) + cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.Decode(ctx, cursor, depth, p) + case '"': + var v string + ptr := unsafe.Pointer(&v) + cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr) + if err != nil { + return 0, err + } + **(**interface{})(unsafe.Pointer(&p)) = v + return cursor, nil + case 't': + if err := validateTrue(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = true + return cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return 0, err + } + cursor += 5 + **(**interface{})(unsafe.Pointer(&p)) = false + return cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**interface{})(unsafe.Pointer(&p)) = nil + return cursor, nil + } + return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} + +func NewPathDecoder() Decoder { + ifaceDecoder := &interfaceDecoder{ + typ: emptyInterfaceType, + structName: "", + fieldName: "", + floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) { + *(*interface{})(p) = v + }), + numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) { + *(*interface{})(p) = v + }), + stringDecoder: newStringDecoder("", ""), + } + ifaceDecoder.sliceDecoder = newSliceDecoder( + ifaceDecoder, + emptyInterfaceType, + emptyInterfaceType.Size(), + "", "", + ) + ifaceDecoder.mapDecoder = newMapDecoder( + interfaceMapType, + stringType, + ifaceDecoder.stringDecoder, + interfaceMapType.Elem(), + ifaceDecoder, + "", "", + ) + return ifaceDecoder +} + +var ( + truebytes = []byte("true") + falsebytes = []byte("false") +) + +func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case '{': + return d.mapDecoder.DecodePath(ctx, cursor, depth) + case '[': + return d.sliceDecoder.DecodePath(ctx, cursor, depth) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.floatDecoder.DecodePath(ctx, cursor, depth) + case '"': + return d.stringDecoder.DecodePath(ctx, cursor, depth) + case 't': + if err := validateTrue(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{truebytes}, cursor, nil + case 'f': + if err := validateFalse(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 5 + return [][]byte{falsebytes}, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + } + return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/invalid.go b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go new file mode 100644 index 0000000000000..4c9721b098928 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/invalid.go @@ -0,0 +1,55 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type invalidDecoder struct { + typ *runtime.Type + kind reflect.Kind + structName string + fieldName string +} + +func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder { + return &invalidDecoder{ + typ: typ, + kind: typ.Kind(), + structName: structName, + fieldName: fieldName, + } +} + +func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go new file mode 100644 index 0000000000000..07a9caea6513b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -0,0 +1,280 @@ +package decoder + +import ( + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type mapDecoder struct { + mapType *runtime.Type + keyType *runtime.Type + valueType *runtime.Type + canUseAssignFaststrType bool + keyDecoder Decoder + valueDecoder Decoder + structName string + fieldName string +} + +func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder { + return &mapDecoder{ + mapType: mapType, + keyDecoder: keyDec, + keyType: keyType, + canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType), + valueType: valueType, + valueDecoder: valueDec, + structName: structName, + fieldName: fieldName, + } +} + +const ( + mapMaxElemSize = 128 +) + +// See detail: https://github.com/goccy/go-json/pull/283 +func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool { + indirectElem := value.Size() > mapMaxElemSize + if indirectElem { + return false + } + return key.Kind() == reflect.String +} + +//go:linkname makemap reflect.makemap +func makemap(*runtime.Type, int) unsafe.Pointer + +//nolint:golint +//go:linkname mapassign_faststr runtime.mapassign_faststr +//go:noescape +func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer + +//go:linkname mapassign reflect.mapassign +//go:noescape +func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer) + +func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) { + if d.canUseAssignFaststrType { + mapV := mapassign_faststr(t, m, *(*string)(k)) + typedmemmove(d.valueType, mapV, v) + } else { + mapassign(t, m, k, v) + } +} + +func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + switch s.skipWhiteSpace() { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return nil + case '{': + default: + return errors.ErrExpected("{ character for map value", s.totalOffset()) + } + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + *(*unsafe.Pointer)(p) = mapValue + s.cursor++ + return nil + } + for { + k := unsafe_New(d.keyType) + if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil { + return err + } + s.skipWhiteSpace() + if !s.equalChar(':') { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + v := unsafe_New(d.valueType) + if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil { + return err + } + d.mapassign(d.mapType, mapValue, k, v) + s.skipWhiteSpace() + if s.equalChar('}') { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + s.cursor++ + return nil + } + if !s.equalChar(',') { + return errors.ErrExpected("comma after object value", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil + return cursor, nil + case '{': + default: + return 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + mapValue := *(*unsafe.Pointer)(p) + if mapValue == nil { + mapValue = makemap(d.mapType, 0) + } + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + for { + k := unsafe_New(d.keyType) + keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + v := unsafe_New(d.valueType) + valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v) + if err != nil { + return 0, err + } + d.mapassign(d.mapType, mapValue, k, v) + cursor = skipWhiteSpace(buf, valueCursor) + if buf[cursor] == '}' { + **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue + cursor++ + return cursor, nil + } + if buf[cursor] != ',' { + return 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + cursor = skipWhiteSpace(buf, cursor) + buflen := int64(len(buf)) + if buflen < 2 { + return nil, 0, errors.ErrExpected("{} for map", cursor) + } + switch buf[cursor] { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '{': + default: + return nil, 0, errors.ErrExpected("{ character for map value", cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return nil, cursor, nil + } + keyDecoder, ok := d.keyDecoder.(*stringDecoder) + if !ok { + return nil, 0, &errors.UnmarshalTypeError{ + Value: "string", + Type: reflect.TypeOf(""), + Offset: cursor, + Struct: d.structName, + Field: d.fieldName, + } + } + ret := [][]byte{} + for { + key, keyCursor, err := keyDecoder.decodeByte(buf, cursor) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(buf, keyCursor) + if buf[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + child, found, err := ctx.Option.Path.Field(string(key)) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return ret, cursor, nil + } + if buf[cursor] != ',' { + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/number.go b/vendor/github.com/goccy/go-json/internal/decoder/number.go new file mode 100644 index 0000000000000..10e5435e6ce26 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/number.go @@ -0,0 +1,123 @@ +package decoder + +import ( + "encoding/json" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type numberDecoder struct { + stringDecoder *stringDecoder + op func(unsafe.Pointer, json.Number) + structName string + fieldName string +} + +func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder { + return &numberDecoder{ + stringDecoder: newStringDecoder(structName, fieldName), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return errors.ErrSyntax(err.Error(), s.totalOffset()) + } + d.op(p, json.Number(string(bytes))) + s.reset() + return nil +} + +func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { + return 0, errors.ErrSyntax(err.Error(), c) + } + cursor = c + s := *(*string)(unsafe.Pointer(&bytes)) + d.op(p, json.Number(s)) + return cursor, nil +} + +func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + start := s.cursor + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return floatBytes(s), nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case '"': + return d.stringDecoder.decodeStreamByte(s) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + if s.cursor == start { + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset()) +} + +func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for floatTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + case '"': + return d.stringDecoder.decodeByte(buf, cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/option.go b/vendor/github.com/goccy/go-json/internal/decoder/option.go new file mode 100644 index 0000000000000..502f772eba0b4 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/option.go @@ -0,0 +1,17 @@ +package decoder + +import "context" + +type OptionFlags uint8 + +const ( + FirstWinOption OptionFlags = 1 << iota + ContextOption + PathOption +) + +type Option struct { + Flags OptionFlags + Context context.Context + Path *Path +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/path.go b/vendor/github.com/goccy/go-json/internal/decoder/path.go new file mode 100644 index 0000000000000..a15ff69e3cd80 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/path.go @@ -0,0 +1,670 @@ +package decoder + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type PathString string + +func (s PathString) Build() (*Path, error) { + builder := new(PathBuilder) + return builder.Build([]rune(s)) +} + +type PathBuilder struct { + root PathNode + node PathNode + singleQuotePathSelector bool + doubleQuotePathSelector bool +} + +func (b *PathBuilder) Build(buf []rune) (*Path, error) { + node, err := b.build(buf) + if err != nil { + return nil, err + } + return &Path{ + node: node, + RootSelectorOnly: node == nil, + SingleQuotePathSelector: b.singleQuotePathSelector, + DoubleQuotePathSelector: b.doubleQuotePathSelector, + }, nil +} + +func (b *PathBuilder) build(buf []rune) (PathNode, error) { + if len(buf) == 0 { + return nil, errors.ErrEmptyPath() + } + if buf[0] != '$' { + return nil, errors.ErrInvalidPath("JSON Path must start with a $ character") + } + if len(buf) == 1 { + return nil, nil + } + buf = buf[1:] + offset, err := b.buildNext(buf) + if err != nil { + return nil, err + } + if len(buf) > offset { + return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:]) + } + return b.root, nil +} + +func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) { + if len(buf) > cursor { + offset, err := b.buildNext(buf[cursor:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + return cursor, nil +} + +func (b *PathBuilder) buildNext(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + offset, err := b.buildSelector(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + case '[': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + offset, err := b.buildIndex(buf[1:]) + if err != nil { + return 0, err + } + return offset + 1, nil + default: + return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0]) + } +} + +func (b *PathBuilder) buildSelector(buf []rune) (int, error) { + switch buf[0] { + case '.': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with double dot character") + } + offset, err := b.buildPathRecursive(buf[1:]) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '"': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with double quote character") + } + offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addSelectorNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) { + switch buf[0] { + case '[', ']', '$', '.', '*', '\'', '"': + return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '\'': + if sel != SingleQuotePathSelector { + return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context") + } + if len(buf) <= cursor+1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context") + } + if buf[cursor+1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1]) + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.singleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+2) + case '"': + if sel != DoubleQuotePathSelector { + return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context") + } + selector := buf[:cursor] + b.addSelectorNode(string(selector)) + b.doubleQuotePathSelector = true + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context") +} + +func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$', '*': + return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0]) + } + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case '$', '*', ']': + return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) + case '.': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with dot character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildSelector(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + case '[': + if cursor+1 >= len(buf) { + return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") + } + selector := buf[:cursor] + b.addRecursiveNode(string(selector)) + offset, err := b.buildIndex(buf[cursor+1:]) + if err != nil { + return 0, err + } + return cursor + 1 + offset, nil + } + } + b.addRecursiveNode(string(buf)) + return len(buf), nil +} + +func (b *PathBuilder) buildIndex(buf []rune) (int, error) { + switch buf[0] { + case '.', '[', ']', '$': + return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0]) + case '\'': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with single quote character") + } + offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector) + if err != nil { + return 0, err + } + return 1 + offset, nil + case '*': + if len(buf) == 1 { + return 0, errors.ErrInvalidPath("JSON Path ends with star character") + } + if buf[1] != ']' { + return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1]) + } + b.addIndexAllNode() + offset := len("*]") + if len(buf) > 2 { + buildOffset, err := b.buildNext(buf[2:]) + if err != nil { + return 0, err + } + return offset + buildOffset, nil + } + return offset, nil + } + + for cursor := 0; cursor < len(buf); cursor++ { + switch buf[cursor] { + case ']': + index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64) + if err != nil { + return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor]) + } + b.addIndexNode(int(index)) + return b.buildNextCharIfExists(buf, cursor+1) + } + } + return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context") +} + +func (b *PathBuilder) addIndexAllNode() { + node := newPathIndexAllNode() + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addRecursiveNode(selector string) { + node := newPathRecursiveNode(selector) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addSelectorNode(name string) { + node := newPathSelectorNode(name) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +func (b *PathBuilder) addIndexNode(idx int) { + node := newPathIndexNode(idx) + if b.root == nil { + b.root = node + b.node = node + } else { + b.node = b.node.chain(node) + } +} + +type QuotePathSelector int + +const ( + SingleQuotePathSelector QuotePathSelector = 1 + DoubleQuotePathSelector QuotePathSelector = 2 +) + +type Path struct { + node PathNode + RootSelectorOnly bool + SingleQuotePathSelector bool + DoubleQuotePathSelector bool +} + +func (p *Path) Field(sel string) (PathNode, bool, error) { + if p.node == nil { + return nil, false, nil + } + return p.node.Field(sel) +} + +func (p *Path) Get(src, dst reflect.Value) error { + if p.node == nil { + return nil + } + return p.node.Get(src, dst) +} + +func (p *Path) String() string { + if p.node == nil { + return "$" + } + return p.node.String() +} + +type PathNode interface { + fmt.Stringer + Index(idx int) (PathNode, bool, error) + Field(fieldName string) (PathNode, bool, error) + Get(src, dst reflect.Value) error + chain(PathNode) PathNode + target() bool + single() bool +} + +type BasePathNode struct { + child PathNode +} + +func (n *BasePathNode) chain(node PathNode) PathNode { + n.child = node + return node +} + +func (n *BasePathNode) target() bool { + return n.child == nil +} + +func (n *BasePathNode) single() bool { + return true +} + +type PathSelectorNode struct { + *BasePathNode + selector string +} + +func newPathSelectorNode(selector string) *PathSelectorNode { + return &PathSelectorNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathSelectorNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(iter.Value(), dst) + } + return AssignValue(iter.Value(), dst) + } + } + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + if child != nil { + return child.Get(src.Field(i), dst) + } + return AssignValue(src.Field(i), dst) + } + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + case reflect.Float64, reflect.String, reflect.Bool: + return AssignValue(src, dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathSelectorNode) String() string { + s := fmt.Sprintf(".%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexNode struct { + *BasePathNode + selector int +} + +func newPathIndexNode(selector int) *PathIndexNode { + return &PathIndexNode{ + BasePathNode: &BasePathNode{}, + selector: selector, + } +} + +func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) { + if n.selector == idx { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + if src.Len() > n.selector { + if n.child != nil { + return n.child.Get(src.Index(n.selector), dst) + } + return AssignValue(src.Index(n.selector), dst) + } + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type()) +} + +func (n *PathIndexNode) String() string { + s := fmt.Sprintf("[%d]", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathIndexAllNode struct { + *BasePathNode +} + +func newPathIndexAllNode() *PathIndexAllNode { + return &PathIndexAllNode{ + BasePathNode: &BasePathNode{}, + } +} + +func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) { + return n.child, true, nil +} + +func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) { + return nil, false, &errors.PathError{} +} + +func (n *PathIndexAllNode) Get(src, dst reflect.Value) error { + switch src.Type().Kind() { + case reflect.Array, reflect.Slice: + var arr []interface{} + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + if n.child != nil { + if err := n.child.Get(src.Index(i), rv); err != nil { + return err + } + } else { + if err := AssignValue(src.Index(i), rv); err != nil { + return err + } + } + arr = append(arr, v) + } + if err := AssignValue(reflect.ValueOf(arr), dst); err != nil { + return err + } + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get all value from %s", src.Type()) +} + +func (n *PathIndexAllNode) String() string { + s := "[*]" + if n.child != nil { + s += n.child.String() + } + return s +} + +type PathRecursiveNode struct { + *BasePathNode + selector string +} + +func newPathRecursiveNode(selector string) *PathRecursiveNode { + node := newPathSelectorNode(selector) + return &PathRecursiveNode{ + BasePathNode: &BasePathNode{ + child: node, + }, + selector: selector, + } +} + +func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) { + if n.selector == fieldName { + return n.child, true, nil + } + return nil, false, nil +} + +func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) { + return n, true, nil +} + +func valueToSliceValue(v interface{}) []interface{} { + rv := reflect.ValueOf(v) + ret := []interface{}{} + if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array { + for i := 0; i < rv.Len(); i++ { + ret = append(ret, rv.Index(i).Interface()) + } + return ret + } + return []interface{}{v} +} + +func (n *PathRecursiveNode) Get(src, dst reflect.Value) error { + if n.child == nil { + return fmt.Errorf("failed to get by recursive path ..%s", n.selector) + } + var arr []interface{} + switch src.Type().Kind() { + case reflect.Map: + iter := src.MapRange() + for iter.Next() { + key, ok := iter.Key().Interface().(string) + if !ok { + return fmt.Errorf("invalid map key type %T", src.Type().Key()) + } + child, found, err := n.Field(key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(iter.Value(), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(iter.Value(), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Struct: + typ := src.Type() + for i := 0; i < typ.Len(); i++ { + tag := runtime.StructTagFromField(typ.Field(i)) + child, found, err := n.Field(tag.Key) + if err != nil { + return err + } + if found { + var v interface{} + rv := reflect.ValueOf(&v) + _ = child.Get(src.Field(i), rv) + arr = append(arr, valueToSliceValue(v)...) + } else { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Field(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Array, reflect.Slice: + for i := 0; i < src.Len(); i++ { + var v interface{} + rv := reflect.ValueOf(&v) + _ = n.Get(src.Index(i), rv) + if v != nil { + arr = append(arr, valueToSliceValue(v)...) + } + } + _ = AssignValue(reflect.ValueOf(arr), dst) + return nil + case reflect.Ptr: + return n.Get(src.Elem(), dst) + case reflect.Interface: + return n.Get(reflect.ValueOf(src.Interface()), dst) + } + return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) +} + +func (n *PathRecursiveNode) String() string { + s := fmt.Sprintf("..%s", n.selector) + if n.child != nil { + s += n.child.String() + } + return s +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/ptr.go b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go new file mode 100644 index 0000000000000..ae2299466af5b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/ptr.go @@ -0,0 +1,97 @@ +package decoder + +import ( + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type ptrDecoder struct { + dec Decoder + typ *runtime.Type + structName string + fieldName string +} + +func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder { + return &ptrDecoder{ + dec: dec, + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *ptrDecoder) contentDecoder() Decoder { + dec, ok := d.dec.(*ptrDecoder) + if !ok { + return d.dec + } + return dec.contentDecoder() +} + +//nolint:golint +//go:linkname unsafe_New reflect.unsafe_New +func unsafe_New(*runtime.Type) unsafe.Pointer + +func UnsafeNew(t *runtime.Type) unsafe.Pointer { + return unsafe_New(t) +} + +func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + if s.skipWhiteSpace() == nul { + s.read() + } + if s.char() == 'n' { + if err := nullBytes(s); err != nil { + return err + } + *(*unsafe.Pointer)(p) = nil + return nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + if err := d.dec.DecodeStream(s, depth, newptr); err != nil { + return err + } + return nil +} + +func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == 'n' { + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + if p != nil { + *(*unsafe.Pointer)(p) = nil + } + cursor += 4 + return cursor, nil + } + var newptr unsafe.Pointer + if *(*unsafe.Pointer)(p) == nil { + newptr = unsafe_New(d.typ) + *(*unsafe.Pointer)(p) = newptr + } else { + newptr = *(*unsafe.Pointer)(p) + } + c, err := d.dec.Decode(ctx, cursor, depth, newptr) + if err != nil { + *(*unsafe.Pointer)(p) = nil + return 0, err + } + cursor = c + return cursor, nil +} + +func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/slice.go b/vendor/github.com/goccy/go-json/internal/decoder/slice.go new file mode 100644 index 0000000000000..30a23e4b51ec0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/slice.go @@ -0,0 +1,380 @@ +package decoder + +import ( + "reflect" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +var ( + sliceType = runtime.Type2RType( + reflect.TypeOf((*sliceHeader)(nil)).Elem(), + ) + nilSlice = unsafe.Pointer(&sliceHeader{}) +) + +type sliceDecoder struct { + elemType *runtime.Type + isElemPointerType bool + valueDecoder Decoder + size uintptr + arrayPool sync.Pool + structName string + fieldName string +} + +// If use reflect.SliceHeader, data type is uintptr. +// In this case, Go compiler cannot trace reference created by newArray(). +// So, define using unsafe.Pointer as data type +type sliceHeader struct { + data unsafe.Pointer + len int + cap int +} + +const ( + defaultSliceCapacity = 2 +) + +func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder { + return &sliceDecoder{ + valueDecoder: dec, + elemType: elemType, + isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map, + size: size, + arrayPool: sync.Pool{ + New: func() interface{} { + return &sliceHeader{ + data: newArray(elemType, defaultSliceCapacity), + len: 0, + cap: defaultSliceCapacity, + } + }, + }, + structName: structName, + fieldName: fieldName, + } +} + +func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader { + slice := d.arrayPool.Get().(*sliceHeader) + if src.len > 0 { + // copy original elem + if slice.cap < src.cap { + data := newArray(d.elemType, src.cap) + slice = &sliceHeader{data: data, len: src.len, cap: src.cap} + } else { + slice.len = src.len + } + copySlice(d.elemType, *slice, *src) + } else { + slice.len = 0 + } + return slice +} + +func (d *sliceDecoder) releaseSlice(p *sliceHeader) { + d.arrayPool.Put(p) +} + +//go:linkname copySlice reflect.typedslicecopy +func copySlice(elemType *runtime.Type, dst, src sliceHeader) int + +//go:linkname newArray reflect.unsafe_NewArray +func newArray(*runtime.Type, int) unsafe.Pointer + +//go:linkname typedmemmove reflect.typedmemmove +func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer) + +func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: "number", + Type: reflect.SliceOf(runtime.RType2Type(d.elemType)), + Struct: d.structName, + Field: d.fieldName, + Offset: offset, + } +} + +func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case 'n': + if err := nullBytes(s); err != nil { + return err + } + typedmemmove(sliceType, p, nilSlice) + return nil + case '[': + s.cursor++ + if s.skipWhiteSpace() == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + s.cursor++ + return nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + + if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil { + return err + } + s.skipWhiteSpace() + RETRY: + switch s.char() { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + s.cursor++ + return nil + case ',': + idx++ + case nul: + if s.read() { + goto RETRY + } + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + goto ERROR + } + s.cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return d.errNumber(s.totalOffset()) + case nul: + if s.read() { + continue + } + goto ERROR + default: + goto ERROR + } + } +ERROR: + return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset()) +} + +func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + typedmemmove(sliceType, p, nilSlice) + return cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + dst := (*sliceHeader)(p) + if dst.data == nil { + dst.data = newArray(d.elemType, 0) + } else { + dst.len = 0 + } + cursor++ + return cursor, nil + } + idx := 0 + slice := d.newSlice((*sliceHeader)(p)) + srcLen := slice.len + capacity := slice.cap + data := slice.data + for { + if capacity <= idx { + src := sliceHeader{data: data, len: idx, cap: capacity} + capacity *= 2 + data = newArray(d.elemType, capacity) + dst := sliceHeader{data: data, len: idx, cap: capacity} + copySlice(d.elemType, dst, src) + } + ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) + // if srcLen is greater than idx, keep the original reference + if srcLen <= idx { + if d.isElemPointerType { + **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer + } else { + // assign new element to the slice + typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) + } + } + c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep) + if err != nil { + return 0, err + } + cursor = c + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + slice.cap = capacity + slice.len = idx + 1 + slice.data = data + dst := (*sliceHeader)(p) + dst.len = idx + 1 + if dst.len > dst.cap { + dst.data = newArray(d.elemType, dst.len) + dst.cap = dst.len + } + copySlice(d.elemType, *dst, *slice) + d.releaseSlice(slice) + cursor++ + return cursor, nil + case ',': + idx++ + default: + slice.cap = capacity + slice.data = data + d.releaseSlice(slice) + return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, d.errNumber(cursor) + default: + return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} + +func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + + ret := [][]byte{} + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return [][]byte{nullbytes}, cursor, nil + case '[': + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == ']' { + cursor++ + return ret, cursor, nil + } + idx := 0 + for { + child, found, err := ctx.Option.Path.node.Index(idx) + if err != nil { + return nil, 0, err + } + if found { + if child != nil { + oldPath := ctx.Option.Path.node + ctx.Option.Path.node = child + paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) + if err != nil { + return nil, 0, err + } + ctx.Option.Path.node = oldPath + ret = append(ret, paths...) + cursor = c + } else { + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + ret = append(ret, buf[start:end]) + cursor = end + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return nil, 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + switch buf[cursor] { + case ']': + cursor++ + return ret, cursor, nil + case ',': + idx++ + default: + return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) + } + cursor++ + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errNumber(cursor) + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) + } + } +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/stream.go b/vendor/github.com/goccy/go-json/internal/decoder/stream.go new file mode 100644 index 0000000000000..a383f72596946 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/stream.go @@ -0,0 +1,556 @@ +package decoder + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +const ( + initBufSize = 512 +) + +type Stream struct { + buf []byte + bufSize int64 + length int64 + r io.Reader + offset int64 + cursor int64 + filledBuffer bool + allRead bool + UseNumber bool + DisallowUnknownFields bool + Option *Option +} + +func NewStream(r io.Reader) *Stream { + return &Stream{ + r: r, + bufSize: initBufSize, + buf: make([]byte, initBufSize), + Option: &Option{}, + } +} + +func (s *Stream) TotalOffset() int64 { + return s.totalOffset() +} + +func (s *Stream) Buffered() io.Reader { + buflen := int64(len(s.buf)) + for i := s.cursor; i < buflen; i++ { + if s.buf[i] == nul { + return bytes.NewReader(s.buf[s.cursor:i]) + } + } + return bytes.NewReader(s.buf[s.cursor:]) +} + +func (s *Stream) PrepareForDecode() error { + for { + switch s.char() { + case ' ', '\t', '\r', '\n': + s.cursor++ + continue + case ',', ':': + s.cursor++ + return nil + case nul: + if s.read() { + continue + } + return io.EOF + } + break + } + return nil +} + +func (s *Stream) totalOffset() int64 { + return s.offset + s.cursor +} + +func (s *Stream) char() byte { + return s.buf[s.cursor] +} + +func (s *Stream) equalChar(c byte) bool { + cur := s.buf[s.cursor] + if cur == nul { + s.read() + cur = s.buf[s.cursor] + } + return cur == c +} + +func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) { + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) bufptr() unsafe.Pointer { + return (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) { + s.cursor-- // for retry ( because caller progress cursor position in each loop ) + return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data +} + +func (s *Stream) Reset() { + s.reset() + s.bufSize = int64(len(s.buf)) +} + +func (s *Stream) More() bool { + for { + switch s.char() { + case ' ', '\n', '\r', '\t': + s.cursor++ + continue + case '}', ']': + return false + case nul: + if s.read() { + continue + } + return false + } + break + } + return true +} + +func (s *Stream) Token() (interface{}, error) { + for { + c := s.char() + switch c { + case ' ', '\n', '\r', '\t': + s.cursor++ + case '{', '[', ']', '}': + s.cursor++ + return json.Delim(c), nil + case ',', ':': + s.cursor++ + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + bytes := floatBytes(s) + str := *(*string)(unsafe.Pointer(&bytes)) + if s.UseNumber { + return json.Number(str), nil + } + f64, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, err + } + return f64, nil + case '"': + bytes, err := stringBytes(s) + if err != nil { + return nil, err + } + return string(bytes), nil + case 't': + if err := trueBytes(s); err != nil { + return nil, err + } + return true, nil + case 'f': + if err := falseBytes(s); err != nil { + return nil, err + } + return false, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + goto END + default: + return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset()) + } + } +END: + return nil, io.EOF +} + +func (s *Stream) reset() { + s.offset += s.cursor + s.buf = s.buf[s.cursor:] + s.length -= s.cursor + s.cursor = 0 +} + +func (s *Stream) readBuf() []byte { + if s.filledBuffer { + s.bufSize *= 2 + remainBuf := s.buf + s.buf = make([]byte, s.bufSize) + copy(s.buf, remainBuf) + } + remainLen := s.length - s.cursor + remainNotNulCharNum := int64(0) + for i := int64(0); i < remainLen; i++ { + if s.buf[s.cursor+i] == nul { + break + } + remainNotNulCharNum++ + } + s.length = s.cursor + remainNotNulCharNum + return s.buf[s.cursor+remainNotNulCharNum:] +} + +func (s *Stream) read() bool { + if s.allRead { + return false + } + buf := s.readBuf() + last := len(buf) - 1 + buf[last] = nul + n, err := s.r.Read(buf[:last]) + s.length += int64(n) + if n == last { + s.filledBuffer = true + } else { + s.filledBuffer = false + } + if err == io.EOF { + s.allRead = true + } else if err != nil { + return false + } + return true +} + +func (s *Stream) skipWhiteSpace() byte { + p := s.bufptr() +LOOP: + c := char(p, s.cursor) + switch c { + case ' ', '\n', '\t', '\r': + s.cursor++ + goto LOOP + case nul: + if s.read() { + p = s.bufptr() + goto LOOP + } + } + return c +} + +func (s *Stream) skipObject(depth int64) error { + braceCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '{': + braceCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + braceCount-- + depth-- + if braceCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '[': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("object of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipArray(depth int64) error { + bracketCount := 1 + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case '[': + bracketCount++ + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case ']': + bracketCount-- + depth-- + if bracketCount == 0 { + s.cursor = cursor + 1 + return nil + } + case '{': + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + case '}': + depth-- + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + case '"': + goto SWITCH_OUT + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("string of object", cursor) + } + } + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("array of object", cursor) + } + SWITCH_OUT: + cursor++ + } +} + +func (s *Stream) skipValue(depth int64) error { + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset()) + case '{': + s.cursor = cursor + 1 + return s.skipObject(depth + 1) + case '[': + s.cursor = cursor + 1 + return s.skipArray(depth + 1) + case '"': + for { + cursor++ + switch char(p, cursor) { + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + case '"': + s.cursor = cursor + 1 + return nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.statForRetry() + continue + } + return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + for { + cursor++ + c := char(p, cursor) + if floatTable[c] { + continue + } else if c == nul { + if s.read() { + _, cursor, p = s.stat() + continue + } + } + s.cursor = cursor + return nil + } + case 't': + s.cursor = cursor + if err := trueBytes(s); err != nil { + return err + } + return nil + case 'f': + s.cursor = cursor + if err := falseBytes(s); err != nil { + return err + } + return nil + case 'n': + s.cursor = cursor + if err := nullBytes(s); err != nil { + return err + } + return nil + } + cursor++ + } +} + +func nullBytes(s *Stream) error { + // current cursor's character is 'n' + s.cursor++ + if s.char() != 'u' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadNull(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadNull(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset()) +} + +func trueBytes(s *Stream) error { + // current cursor's character is 't' + s.cursor++ + if s.char() != 'r' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'u' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadTrue(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadTrue(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset()) +} + +func falseBytes(s *Stream) error { + // current cursor's character is 'f' + s.cursor++ + if s.char() != 'a' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'l' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 's' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + if s.char() != 'e' { + if err := retryReadFalse(s); err != nil { + return err + } + } + s.cursor++ + return nil +} + +func retryReadFalse(s *Stream) error { + if s.char() == nul && s.read() { + return nil + } + return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset()) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/string.go b/vendor/github.com/goccy/go-json/internal/decoder/string.go new file mode 100644 index 0000000000000..32602c908ae4a --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/string.go @@ -0,0 +1,452 @@ +package decoder + +import ( + "bytes" + "fmt" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type stringDecoder struct { + structName string + fieldName string +} + +func newStringDecoder(structName, fieldName string) *stringDecoder { + return &stringDecoder{ + structName: structName, + fieldName: fieldName, + } +} + +func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: typeName, + Type: reflect.TypeOf(""), + Offset: offset, + Struct: d.structName, + Field: d.fieldName, + } +} + +func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + s.reset() + return nil +} + +func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) + return cursor, nil +} + +func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return nil, 0, err + } + if bytes == nil { + return [][]byte{nullbytes}, c, nil + } + return [][]byte{bytes}, c, nil +} + +var ( + hexToInt = [256]int{ + '0': 0, + '1': 1, + '2': 2, + '3': 3, + '4': 4, + '5': 5, + '6': 6, + '7': 7, + '8': 8, + '9': 9, + 'A': 10, + 'B': 11, + 'C': 12, + 'D': 13, + 'E': 14, + 'F': 15, + 'a': 10, + 'b': 11, + 'c': 12, + 'd': 13, + 'e': 14, + 'f': 15, + } +) + +func unicodeToRune(code []byte) rune { + var r rune + for i := 0; i < len(code); i++ { + r = r*16 + rune(hexToInt[code[i]]) + } + return r +} + +func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { + for s.cursor+n >= s.length { + if !s.read() { + return false + } + *p = s.bufptr() + } + return true +} + +func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { + const defaultOffset = 5 + const surrogateOffset = 11 + + if !readAtLeast(s, defaultOffset, &p) { + return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + + r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + if !readAtLeast(s, surrogateOffset, &p) { + return unicode.ReplacementChar, defaultOffset, p, nil + } + if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { + return unicode.ReplacementChar, defaultOffset, p, nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return r, surrogateOffset, p, nil + } + } + return r, defaultOffset, p, nil +} + +func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + const backSlashAndULen = 2 // length of \u + + r, offset, pp, err := decodeUnicodeRune(s, p) + if err != nil { + return nil, err + } + unicode := []byte(string(r)) + unicodeLen := int64(len(unicode)) + s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...) + unicodeOrgLen := offset - 1 + s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen)) + s.cursor = s.cursor - backSlashAndULen + unicodeLen + return pp, nil +} + +func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { + s.cursor++ +RETRY: + switch s.buf[s.cursor] { + case '"': + s.buf[s.cursor] = '"' + case '\\': + s.buf[s.cursor] = '\\' + case '/': + s.buf[s.cursor] = '/' + case 'b': + s.buf[s.cursor] = '\b' + case 'f': + s.buf[s.cursor] = '\f' + case 'n': + s.buf[s.cursor] = '\n' + case 'r': + s.buf[s.cursor] = '\r' + case 't': + s.buf[s.cursor] = '\t' + case 'u': + return decodeUnicode(s, p) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) + } + p = s.bufptr() + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...) + s.length-- + s.cursor-- + p = s.bufptr() + return p, nil +} + +var ( + runeErrBytes = []byte(string(utf8.RuneError)) + runeErrBytesLen = int64(len(runeErrBytes)) +) + +func stringBytes(s *Stream) ([]byte, error) { + _, cursor, p := s.stat() + cursor++ // skip double quote char + start := cursor + for { + switch char(p, cursor) { + case '\\': + s.cursor = cursor + pp, err := decodeEscapeString(s, p) + if err != nil { + return nil, err + } + p = pp + cursor = s.cursor + case '"': + literal := s.buf[start:cursor] + cursor++ + s.cursor = cursor + return literal, nil + case + // 0x00 is nul, 0x5c is '\\', 0x22 is '"' . + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F + 0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F + // character is ASCII. skip to next char + case + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F + 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF + 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF + 0xC0, 0xC1, // 0xC0-0xC1 + 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE + // character is invalid + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + _, _, p = s.stat() + cursor += runeErrBytesLen + s.length += runeErrBytesLen + continue + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + case 0xEF: + // RuneError is {0xEF, 0xBF, 0xBD} + if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD { + // found RuneError: skip + cursor += 2 + break + } + fallthrough + default: + // multi bytes character + if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) { + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + goto ERROR + } + r, size := utf8.DecodeRune(s.buf[cursor:]) + if r == utf8.RuneError { + s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) + cursor += runeErrBytesLen + s.length += runeErrBytesLen + _, _, p = s.stat() + } else { + cursor += int64(size) + } + continue + } + cursor++ + } +ERROR: + return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) +} + +func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '[': + return nil, d.errUnmarshalType("array", s.totalOffset()) + case '{': + return nil, d.errUnmarshalType("object", s.totalOffset()) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, d.errUnmarshalType("number", s.totalOffset()) + case '"': + return stringBytes(s) + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + } + break + } + return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) +} + +func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + case '[': + return nil, 0, d.errUnmarshalType("array", cursor) + case '{': + return nil, 0, d.errUnmarshalType("object", cursor) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return nil, 0, d.errUnmarshalType("number", cursor) + case '"': + cursor++ + start := cursor + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + escaped := 0 + for { + switch char(b, cursor) { + case '\\': + escaped++ + cursor++ + switch char(b, cursor) { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + cursor++ + case 'u': + buflen := int64(len(buf)) + if cursor+5 >= buflen { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + for i := int64(1); i <= 4; i++ { + c := char(b, cursor+i) + if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) { + return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i) + } + } + cursor += 5 + default: + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + continue + case '"': + literal := buf[start:cursor] + if escaped > 0 { + literal = literal[:unescapeString(literal)] + } + cursor++ + return literal, cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + cursor++ + } + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) + } + } +} + +var unescapeMap = [256]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer { + return unsafe.Pointer(uintptr(ptr) + uintptr(offset)) +} + +func unescapeString(buf []byte) int { + p := (*sliceHeader)(unsafe.Pointer(&buf)).data + end := unsafeAdd(p, len(buf)) + src := unsafeAdd(p, bytes.IndexByte(buf, '\\')) + dst := src + for src != end { + c := char(src, 0) + if c == '\\' { + escapeChar := char(src, 1) + if escapeChar != 'u' { + *(*byte)(dst) = unescapeMap[escapeChar] + src = unsafeAdd(src, 2) + dst = unsafeAdd(dst, 1) + } else { + v1 := hexToInt[char(src, 2)] + v2 := hexToInt[char(src, 3)] + v3 := hexToInt[char(src, 4)] + v4 := hexToInt[char(src, 5)] + code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) { + if char(src, 6) == '\\' && char(src, 7) == 'u' { + v1 := hexToInt[char(src, 8)] + v2 := hexToInt[char(src, 9)] + v3 := hexToInt[char(src, 10)] + v4 := hexToInt[char(src, 11)] + lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) + if lo >= 0xdc00 && lo < 0xe000 { + code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000 + src = unsafeAdd(src, 6) + } + } + } + var b [utf8.UTFMax]byte + n := utf8.EncodeRune(b[:], code) + switch n { + case 4: + *(*byte)(unsafeAdd(dst, 3)) = b[3] + fallthrough + case 3: + *(*byte)(unsafeAdd(dst, 2)) = b[2] + fallthrough + case 2: + *(*byte)(unsafeAdd(dst, 1)) = b[1] + fallthrough + case 1: + *(*byte)(unsafeAdd(dst, 0)) = b[0] + } + src = unsafeAdd(src, 6) + dst = unsafeAdd(dst, n) + } + } else { + *(*byte)(dst) = c + src = unsafeAdd(src, 1) + dst = unsafeAdd(dst, 1) + } + } + return int(uintptr(dst) - uintptr(p)) +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go new file mode 100644 index 0000000000000..313da153b36ee --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -0,0 +1,845 @@ +package decoder + +import ( + "fmt" + "math" + "math/bits" + "sort" + "strings" + "unicode" + "unicode/utf16" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +type structFieldSet struct { + dec Decoder + offset uintptr + isTaggedKey bool + fieldIdx int + key string + keyLen int64 + err error +} + +type structDecoder struct { + fieldMap map[string]*structFieldSet + fieldUniqueNameNum int + stringDecoder *stringDecoder + structName string + fieldName string + isTriedOptimize bool + keyBitmapUint8 [][256]uint8 + keyBitmapUint16 [][256]uint16 + sortedFieldSets []*structFieldSet + keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error) + keyStreamDecoder func(*structDecoder, *Stream) (*structFieldSet, string, error) +} + +var ( + largeToSmallTable [256]byte +) + +func init() { + for i := 0; i < 256; i++ { + c := i + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + largeToSmallTable[i] = byte(c) + } +} + +func toASCIILower(s string) string { + b := []byte(s) + for i := range b { + b[i] = largeToSmallTable[b[i]] + } + return string(b) +} + +func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { + return &structDecoder{ + fieldMap: fieldMap, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + keyDecoder: decodeKey, + keyStreamDecoder: decodeKeyStream, + } +} + +const ( + allowOptimizeMaxKeyLen = 64 + allowOptimizeMaxFieldLen = 16 +) + +func (d *structDecoder) tryOptimize() { + fieldUniqueNameMap := map[string]int{} + fieldIdx := -1 + for k, v := range d.fieldMap { + lower := strings.ToLower(k) + idx, exists := fieldUniqueNameMap[lower] + if exists { + v.fieldIdx = idx + } else { + fieldIdx++ + v.fieldIdx = fieldIdx + } + fieldUniqueNameMap[lower] = fieldIdx + } + d.fieldUniqueNameNum = len(fieldUniqueNameMap) + + if d.isTriedOptimize { + return + } + fieldMap := map[string]*structFieldSet{} + conflicted := map[string]struct{}{} + for k, v := range d.fieldMap { + key := strings.ToLower(k) + if key != k { + if key != toASCIILower(k) { + d.isTriedOptimize = true + return + } + // already exists same key (e.g. Hello and HELLO has same lower case key + if _, exists := conflicted[key]; exists { + d.isTriedOptimize = true + return + } + conflicted[key] = struct{}{} + } + if field, exists := fieldMap[key]; exists { + if field != v { + d.isTriedOptimize = true + return + } + } + fieldMap[key] = v + } + + if len(fieldMap) > allowOptimizeMaxFieldLen { + d.isTriedOptimize = true + return + } + + var maxKeyLen int + sortedKeys := []string{} + for key := range fieldMap { + keyLen := len(key) + if keyLen > allowOptimizeMaxKeyLen { + d.isTriedOptimize = true + return + } + if maxKeyLen < keyLen { + maxKeyLen = keyLen + } + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + + // By allocating one extra capacity than `maxKeyLen`, + // it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time. + bitmapLen := maxKeyLen + 1 + if len(sortedKeys) <= 8 { + keyBitmap := make([][256]uint8, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint8 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint8 + d.keyStreamDecoder = decodeKeyByBitmapUint8Stream + } else { + keyBitmap := make([][256]uint16, bitmapLen) + for i, key := range sortedKeys { + for j := 0; j < len(key); j++ { + c := key[j] + keyBitmap[j][c] |= (1 << uint(i)) + } + d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) + } + d.keyBitmapUint16 = keyBitmap + d.keyDecoder = decodeKeyByBitmapUint16 + d.keyStreamDecoder = decodeKeyByBitmapUint16Stream + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if cursor+defaultOffset >= int64(len(buf)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + + r := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + cursor += defaultOffset + if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { + return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil + } + cursor += 2 + r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + return []byte(string(r)), cursor + defaultOffset - 1, nil + } + } + return []byte(string(r)), cursor + defaultOffset - 1, nil +} + +func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) { + c := buf[cursor] + cursor++ + switch c { + case '"': + return []byte{'"'}, cursor, nil + case '\\': + return []byte{'\\'}, cursor, nil + case '/': + return []byte{'/'}, cursor, nil + case 'b': + return []byte{'\b'}, cursor, nil + case 'f': + return []byte{'\f'}, cursor, nil + case 'n': + return []byte{'\n'}, cursor, nil + case 'r': + return []byte{'\r'}, cursor, nil + case 't': + return []byte{'\t'}, cursor, nil + case 'u': + return decodeKeyCharByUnicodeRune(buf, cursor) + } + return nil, cursor, nil +} + +func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + for { + switch char(b, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case '"': + cursor++ + c := char(b, cursor) + switch c { + case '"': + cursor++ + return cursor, nil, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + start := cursor + for { + c := char(b, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + if keyLen < field.keyLen { + // early match + return cursor, nil, nil + } + return cursor, field, nil + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + case '\\': + cursor++ + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor = nextCursor + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + return decodeKeyNotFound(b, cursor) + } + keyIdx++ + } + cursor++ + } + default: + return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + } +} + +func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) { + for { + cursor++ + switch char(b, cursor) { + case '"': + cursor++ + return cursor, nil, nil + case '\\': + cursor++ + if char(b, cursor) == nul { + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + case nul: + return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) + } + } +} + +func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { + key, c, err := d.stringDecoder.decodeByte(buf, cursor) + if err != nil { + return 0, nil, err + } + cursor = c + k := *(*string)(unsafe.Pointer(&key)) + field, exists := d.fieldMap[k] + if !exists { + return cursor, nil, nil + } + return cursor, field, nil +} + +func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint8 = math.MaxUint8 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint8 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros8(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + var ( + curBit uint16 = math.MaxUint16 + ) + _, cursor, p := s.stat() + for { + switch char(p, cursor) { + case ' ', '\n', '\t', '\r': + cursor++ + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + case '"': + cursor++ + FIRST_CHAR: + start := cursor + switch char(p, cursor) { + case '"': + cursor++ + s.cursor = cursor + return nil, "", nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + goto FIRST_CHAR + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + keyIdx := 0 + bitmap := d.keyBitmapUint16 + for { + c := char(p, cursor) + switch c { + case '"': + fieldSetIndex := bits.TrailingZeros16(curBit) + field := d.sortedFieldSets[fieldSetIndex] + keyLen := cursor - start + cursor++ + s.cursor = cursor + if keyLen < field.keyLen { + // early match + return nil, field.key, nil + } + return field, field.key, nil + case nul: + s.cursor = cursor + if s.read() { + _, cursor, p = s.stat() + continue + } + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + case '\\': + s.cursor = cursor + 1 // skip '\' char + chars, err := decodeKeyCharByEscapeCharStream(s) + if err != nil { + return nil, "", err + } + cursor = s.cursor + for _, c := range chars { + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + default: + curBit &= bitmap[keyIdx][largeToSmallTable[c]] + if curBit == 0 { + s.cursor = cursor + return decodeKeyNotFoundStream(s, start) + } + keyIdx++ + } + cursor++ + } + default: + return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) + } + } +} + +// decode from '\uXXXX' +func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) { + const defaultOffset = 4 + const surrogateOffset = 6 + + if s.cursor+defaultOffset >= s.length { + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset()) + } + } + + r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset]) + if utf16.IsSurrogate(r) { + s.cursor += defaultOffset + if s.cursor+surrogateOffset >= s.length { + s.read() + } + if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' { + s.cursor += defaultOffset - 1 + return []byte(string(unicode.ReplacementChar)), nil + } + r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) + if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil + } + } + s.cursor += defaultOffset - 1 + return []byte(string(r)), nil +} + +func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) { + c := s.buf[s.cursor] + s.cursor++ +RETRY: + switch c { + case '"': + return []byte{'"'}, nil + case '\\': + return []byte{'\\'}, nil + case '/': + return []byte{'/'}, nil + case 'b': + return []byte{'\b'}, nil + case 'f': + return []byte{'\f'}, nil + case 'n': + return []byte{'\n'}, nil + case 'r': + return []byte{'\r'}, nil + case 't': + return []byte{'\t'}, nil + case 'u': + return decodeKeyCharByUnicodeRuneStream(s) + case nul: + if !s.read() { + return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset()) + } + goto RETRY + default: + return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset()) + } +} + +func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) { + buf, cursor, p := s.stat() + for { + cursor++ + switch char(p, cursor) { + case '"': + b := buf[start:cursor] + key := *(*string)(unsafe.Pointer(&b)) + cursor++ + s.cursor = cursor + return nil, key, nil + case '\\': + cursor++ + if char(p, cursor) == nul { + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + case nul: + s.cursor = cursor + if !s.read() { + return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) + } + buf, cursor, p = s.statForRetry() + } + } +} + +func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { + key, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return nil, "", err + } + k := *(*string)(unsafe.Pointer(&key)) + return d.fieldMap[k], k, nil +} + +func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + depth++ + if depth > maxDecodeNestingDepth { + return errors.ErrExceededMaxDepth(s.char(), s.cursor) + } + + c := s.skipWhiteSpace() + switch c { + case 'n': + if err := nullBytes(s); err != nil { + return err + } + return nil + default: + if s.char() != '{' { + return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) + } + } + s.cursor++ + if s.skipWhiteSpace() == '}' { + s.cursor++ + return nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (s.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + s.reset() + field, key, err := d.keyStreamDecoder(d, s) + if err != nil { + return err + } + if s.skipWhiteSpace() != ':' { + return errors.ErrExpected("colon after object key", s.totalOffset()) + } + s.cursor++ + if field != nil { + if field.err != nil { + return field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + if err := s.skipValue(depth); err != nil { + return err + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return s.skipObject(depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { + return err + } + } + } else if s.DisallowUnknownFields { + return fmt.Errorf("json: unknown field %q", key) + } else { + if err := s.skipValue(depth); err != nil { + return err + } + } + c := s.skipWhiteSpace() + if c == '}' { + s.cursor++ + return nil + } + if c != ',' { + return errors.ErrExpected("comma after object element", s.totalOffset()) + } + s.cursor++ + } +} + +func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + depth++ + if depth > maxDecodeNestingDepth { + return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) + } + buflen := int64(len(buf)) + cursor = skipWhiteSpace(buf, cursor) + b := (*sliceHeader)(unsafe.Pointer(&buf)).data + switch char(b, cursor) { + case 'n': + if err := validateNull(buf, cursor); err != nil { + return 0, err + } + cursor += 4 + return cursor, nil + case '{': + default: + return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) + } + cursor++ + cursor = skipWhiteSpace(buf, cursor) + if buf[cursor] == '}' { + cursor++ + return cursor, nil + } + var ( + seenFields map[int]struct{} + seenFieldNum int + ) + firstWin := (ctx.Option.Flags & FirstWinOption) != 0 + if firstWin { + seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) + } + for { + c, field, err := d.keyDecoder(d, buf, cursor) + if err != nil { + return 0, err + } + cursor = skipWhiteSpace(buf, c) + if char(b, cursor) != ':' { + return 0, errors.ErrExpected("colon after object key", cursor) + } + cursor++ + if cursor >= buflen { + return 0, errors.ErrExpected("object value after colon", cursor) + } + if field != nil { + if field.err != nil { + return 0, field.err + } + if firstWin { + if _, exists := seenFields[field.fieldIdx]; exists { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + seenFieldNum++ + if d.fieldUniqueNameNum <= seenFieldNum { + return skipObject(buf, cursor, depth) + } + seenFields[field.fieldIdx] = struct{}{} + } + } else { + c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) + if err != nil { + return 0, err + } + cursor = c + } + } else { + c, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + cursor = c + } + cursor = skipWhiteSpace(buf, cursor) + if char(b, cursor) == '}' { + cursor++ + return cursor, nil + } + if char(b, cursor) != ',' { + return 0, errors.ErrExpected("comma after object element", cursor) + } + cursor++ + } +} + +func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: struct decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/type.go b/vendor/github.com/goccy/go-json/internal/decoder/type.go new file mode 100644 index 0000000000000..beaf3ab866be1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/type.go @@ -0,0 +1,30 @@ +package decoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "unsafe" +) + +type Decoder interface { + Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) + DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error) + DecodeStream(*Stream, int64, unsafe.Pointer) error +} + +const ( + nul = '\000' + maxDecodeNestingDepth = 10000 +) + +type unmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +var ( + unmarshalJSONType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem() + unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/uint.go b/vendor/github.com/goccy/go-json/internal/decoder/uint.go new file mode 100644 index 0000000000000..4131731b8e4d9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/uint.go @@ -0,0 +1,194 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type uintDecoder struct { + typ *runtime.Type + kind reflect.Kind + op func(unsafe.Pointer, uint64) + structName string + fieldName string +} + +func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder { + return &uintDecoder{ + typ: typ, + kind: typ.Kind(), + op: op, + structName: structName, + fieldName: fieldName, + } +} + +func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { + return &errors.UnmarshalTypeError{ + Value: fmt.Sprintf("number %s", string(buf)), + Type: runtime.RType2Type(d.typ), + Offset: offset, + } +} + +var ( + pow10u64 = [...]uint64{ + 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + } + pow10u64Len = len(pow10u64) +) + +func (d *uintDecoder) parseUint(b []byte) (uint64, error) { + maxDigit := len(b) + if maxDigit > pow10u64Len { + return 0, fmt.Errorf("invalid length of number") + } + sum := uint64(0) + for i := 0; i < maxDigit; i++ { + c := uint64(b[i]) - 48 + digitValue := pow10u64[maxDigit-i-1] + sum += c * digitValue + } + return sum, nil +} + +func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) { + for { + switch s.char() { + case ' ', '\n', '\t', '\r': + s.cursor++ + continue + case '0': + s.cursor++ + return numZeroBuf, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := s.cursor + for { + s.cursor++ + if numTable[s.char()] { + continue + } else if s.char() == nul { + if s.read() { + s.cursor-- // for retry current character + continue + } + } + break + } + num := s.buf[start:s.cursor] + return num, nil + case 'n': + if err := nullBytes(s); err != nil { + return nil, err + } + return nil, nil + case nul: + if s.read() { + continue + } + default: + return nil, d.typeError([]byte{s.char()}, s.totalOffset()) + } + break + } + return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset()) +} + +func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { + for { + switch buf[cursor] { + case ' ', '\n', '\t', '\r': + cursor++ + continue + case '0': + cursor++ + return numZeroBuf, cursor, nil + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + start := cursor + cursor++ + for numTable[buf[cursor]] { + cursor++ + } + num := buf[start:cursor] + return num, cursor, nil + case 'n': + if err := validateNull(buf, cursor); err != nil { + return nil, 0, err + } + cursor += 4 + return nil, cursor, nil + default: + return nil, 0, d.typeError([]byte{buf[cursor]}, cursor) + } + } +} + +func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + return nil + } + u64, err := d.parseUint(bytes) + if err != nil { + return d.typeError(bytes, s.totalOffset()) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return d.typeError(bytes, s.totalOffset()) + } + } + d.op(p, u64) + return nil +} + +func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + return c, nil + } + cursor = c + u64, err := d.parseUint(bytes) + if err != nil { + return 0, d.typeError(bytes, cursor) + } + switch d.kind { + case reflect.Uint8: + if (1 << 8) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint16: + if (1 << 16) <= u64 { + return 0, d.typeError(bytes, cursor) + } + case reflect.Uint32: + if (1 << 32) <= u64 { + return 0, d.typeError(bytes, cursor) + } + } + d.op(p, u64) + return cursor, nil +} + +func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: uint decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go new file mode 100644 index 0000000000000..4cd6dbd573ffc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go @@ -0,0 +1,104 @@ +package decoder + +import ( + "context" + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalJSONDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder { + return &unmarshalJSONDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + switch v := v.(type) { + case unmarshalerContext: + var ctx context.Context + if (s.Option.Flags & ContextOption) != 0 { + ctx = s.Option.Context + } else { + ctx = context.Background() + } + if err := v.UnmarshalJSON(ctx, dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + case json.Unmarshaler: + if err := v.UnmarshalJSON(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + } + return nil +} + +func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + dst := make([]byte, len(src)) + copy(dst, src) + + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if (ctx.Option.Flags & ContextOption) != 0 { + if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } else { + if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil { + d.annotateError(cursor, err) + return 0, err + } + } + return end, nil +} + +func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go new file mode 100644 index 0000000000000..d711d0f85f065 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go @@ -0,0 +1,285 @@ +package decoder + +import ( + "bytes" + "encoding" + "fmt" + "unicode" + "unicode/utf16" + "unicode/utf8" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type unmarshalTextDecoder struct { + typ *runtime.Type + structName string + fieldName string +} + +func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder { + return &unmarshalTextDecoder{ + typ: typ, + structName: structName, + fieldName: fieldName, + } +} + +func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) { + switch e := err.(type) { + case *errors.UnmarshalTypeError: + e.Struct = d.structName + e.Field = d.fieldName + case *errors.SyntaxError: + e.Offset = cursor + } +} + +var ( + nullbytes = []byte(`null`) +) + +func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + s.skipWhiteSpace() + start := s.cursor + if err := s.skipValue(depth); err != nil { + return err + } + src := s.buf[start:s.cursor] + if len(src) > 0 { + switch src[0] { + case '[': + return &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '{': + return &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: s.totalOffset(), + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return nil + } + } + } + dst := make([]byte, len(src)) + copy(dst, src) + + if b, ok := unquoteBytes(dst); ok { + dst = b + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: p, + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil { + d.annotateError(s.cursor, err) + return err + } + return nil +} + +func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + buf := ctx.Buf + cursor = skipWhiteSpace(buf, cursor) + start := cursor + end, err := skipValue(buf, cursor, depth) + if err != nil { + return 0, err + } + src := buf[start:end] + if len(src) > 0 { + switch src[0] { + case '[': + return 0, &errors.UnmarshalTypeError{ + Value: "array", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '{': + return 0, &errors.UnmarshalTypeError{ + Value: "object", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return 0, &errors.UnmarshalTypeError{ + Value: "number", + Type: runtime.RType2Type(d.typ), + Offset: start, + } + case 'n': + if bytes.Equal(src, nullbytes) { + *(*unsafe.Pointer)(p) = nil + return end, nil + } + } + } + + if s, ok := unquoteBytes(src); ok { + src = s + } + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: d.typ, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) + if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil { + d.annotateError(cursor, err) + return 0, err + } + return end, nil +} + +func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path") +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { //nolint: nonamedreturns + length := len(s) + if length < 2 || s[0] != '"' || s[length-1] != '"' { + return + } + s = s[1 : length-1] + length -= 2 + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < length { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == length { + return s, true + } + + b := make([]byte, length+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < length { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= length { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} + +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} diff --git a/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go new file mode 100644 index 0000000000000..0c4e2e6eacfcf --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go @@ -0,0 +1,73 @@ +package decoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type wrappedStringDecoder struct { + typ *runtime.Type + dec Decoder + stringDecoder *stringDecoder + structName string + fieldName string + isPtrType bool +} + +func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder { + return &wrappedStringDecoder{ + typ: typ, + dec: dec, + stringDecoder: newStringDecoder(structName, fieldName), + structName: structName, + fieldName: fieldName, + isPtrType: typ.Kind() == reflect.Ptr, + } +} + +func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { + bytes, err := d.stringDecoder.decodeStreamByte(s) + if err != nil { + return err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return nil + } + b := make([]byte, len(bytes)+1) + copy(b, bytes) + if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil { + return err + } + return nil +} + +func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { + bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor) + if err != nil { + return 0, err + } + if bytes == nil { + if d.isPtrType { + *(*unsafe.Pointer)(p) = nil + } + return c, nil + } + bytes = append(bytes, nul) + oldBuf := ctx.Buf + ctx.Buf = bytes + if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil { + return 0, err + } + ctx.Buf = oldBuf + return c, nil +} + +func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { + return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path") +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go new file mode 100644 index 0000000000000..5b08faefc738e --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -0,0 +1,1023 @@ +package encoder + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type Code interface { + Kind() CodeKind + ToOpcode(*compileContext) Opcodes + Filter(*FieldQuery) Code +} + +type AnonymousCode interface { + ToAnonymousOpcode(*compileContext) Opcodes +} + +type Opcodes []*Opcode + +func (o Opcodes) First() *Opcode { + if len(o) == 0 { + return nil + } + return o[0] +} + +func (o Opcodes) Last() *Opcode { + if len(o) == 0 { + return nil + } + return o[len(o)-1] +} + +func (o Opcodes) Add(codes ...*Opcode) Opcodes { + return append(o, codes...) +} + +type CodeKind int + +const ( + CodeKindInterface CodeKind = iota + CodeKindPtr + CodeKindInt + CodeKindUint + CodeKindFloat + CodeKindString + CodeKindBool + CodeKindStruct + CodeKindMap + CodeKindSlice + CodeKindArray + CodeKindBytes + CodeKindMarshalJSON + CodeKindMarshalText + CodeKindRecursive +) + +type IntCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *IntCode) Kind() CodeKind { + return CodeKindInt +} + +func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpIntPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpIntString) + default: + code = newOpCode(ctx, c.typ, OpInt) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *IntCode) Filter(_ *FieldQuery) Code { + return c +} + +type UintCode struct { + typ *runtime.Type + bitSize uint8 + isString bool + isPtr bool +} + +func (c *UintCode) Kind() CodeKind { + return CodeKindUint +} + +func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpUintPtr) + case c.isString: + code = newOpCode(ctx, c.typ, OpUintString) + default: + code = newOpCode(ctx, c.typ, OpUint) + } + code.NumBitSize = c.bitSize + ctx.incIndex() + return Opcodes{code} +} + +func (c *UintCode) Filter(_ *FieldQuery) Code { + return c +} + +type FloatCode struct { + typ *runtime.Type + bitSize uint8 + isPtr bool +} + +func (c *FloatCode) Kind() CodeKind { + return CodeKindFloat +} + +func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32Ptr) + default: + code = newOpCode(ctx, c.typ, OpFloat64Ptr) + } + default: + switch c.bitSize { + case 32: + code = newOpCode(ctx, c.typ, OpFloat32) + default: + code = newOpCode(ctx, c.typ, OpFloat64) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *FloatCode) Filter(_ *FieldQuery) Code { + return c +} + +type StringCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *StringCode) Kind() CodeKind { + return CodeKindString +} + +func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes { + isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType) + var code *Opcode + if c.isPtr { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumberPtr) + } else { + code = newOpCode(ctx, c.typ, OpStringPtr) + } + } else { + if isJSONNumberType { + code = newOpCode(ctx, c.typ, OpNumber) + } else { + code = newOpCode(ctx, c.typ, OpString) + } + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *StringCode) Filter(_ *FieldQuery) Code { + return c +} + +type BoolCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BoolCode) Kind() CodeKind { + return CodeKindBool +} + +func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBoolPtr) + default: + code = newOpCode(ctx, c.typ, OpBool) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BoolCode) Filter(_ *FieldQuery) Code { + return c +} + +type BytesCode struct { + typ *runtime.Type + isPtr bool +} + +func (c *BytesCode) Kind() CodeKind { + return CodeKindBytes +} + +func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpBytesPtr) + default: + code = newOpCode(ctx, c.typ, OpBytes) + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *BytesCode) Filter(_ *FieldQuery) Code { + return c +} + +type SliceCode struct { + typ *runtime.Type + value Code +} + +func (c *SliceCode) Kind() CodeKind { + return CodeKindSlice +} + +func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + size := c.typ.Elem().Size() + header := newSliceHeaderCode(ctx, c.typ) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size) + ctx.incIndex() + end := newOpCode(ctx, c.typ, OpSliceEnd) + ctx.incIndex() + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *SliceCode) Filter(_ *FieldQuery) Code { + return c +} + +type ArrayCode struct { + typ *runtime.Type + value Code +} + +func (c *ArrayCode) Kind() CodeKind { + return CodeKindArray +} + +func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes { + // header => opcode => elem => end + // ^ | + // |________| + elem := c.typ.Elem() + alen := c.typ.Len() + size := elem.Size() + + header := newArrayHeaderCode(ctx, c.typ, alen) + ctx.incIndex() + + ctx.incIndent() + codes := c.value.ToOpcode(ctx) + ctx.decIndent() + + codes.First().Flags |= IndirectFlags + + elemCode := newArrayElemCode(ctx, elem, header, alen, size) + ctx.incIndex() + + end := newOpCode(ctx, c.typ, OpArrayEnd) + ctx.incIndex() + + header.End = end + header.Next = codes.First() + codes.Last().Next = elemCode + elemCode.Next = codes.First() + elemCode.End = end + + return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) +} + +func (c *ArrayCode) Filter(_ *FieldQuery) Code { + return c +} + +type MapCode struct { + typ *runtime.Type + key Code + value Code +} + +func (c *MapCode) Kind() CodeKind { + return CodeKindMap +} + +func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => value => code => key => code => value => code => end + // ^ | + // |_______________________| + header := newMapHeaderCode(ctx, c.typ) + ctx.incIndex() + + keyCodes := c.key.ToOpcode(ctx) + + value := newMapValueCode(ctx, c.typ.Elem(), header) + ctx.incIndex() + + ctx.incIndent() + valueCodes := c.value.ToOpcode(ctx) + ctx.decIndent() + + valueCodes.First().Flags |= IndirectFlags + + key := newMapKeyCode(ctx, c.typ.Key(), header) + ctx.incIndex() + + end := newMapEndCode(ctx, c.typ, header) + ctx.incIndex() + + header.Next = keyCodes.First() + keyCodes.Last().Next = value + value.Next = valueCodes.First() + valueCodes.Last().Next = key + key.Next = keyCodes.First() + + header.End = end + key.End = end + value.End = end + return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end) +} + +func (c *MapCode) Filter(_ *FieldQuery) Code { + return c +} + +type StructCode struct { + typ *runtime.Type + fields []*StructFieldCode + isPtr bool + disableIndirectConversion bool + isIndirect bool + isRecursive bool +} + +func (c *StructCode) Kind() CodeKind { + return CodeKindStruct +} + +func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode { + if isEmbeddedStruct(field) { + return c.lastAnonymousFieldCode(firstField) + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { + // firstField is special StructHead operation for anonymous structure. + // So, StructHead's next operation is truly struct head operation. + for firstField.Op == OpStructHead || firstField.Op == OpStructField { + firstField = firstField.Next + } + lastField := firstField + for lastField.NextField != nil { + lastField = lastField.NextField + } + return lastField +} + +func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + ctx.incIndent() + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + endField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = endField + } else { + firstField.End = endField + } + codes = codes.Add(fieldCodes...) + break + } + prevField = c.lastFieldCode(field, firstField) + codes = codes.Add(fieldCodes...) + } + if len(codes) == 0 { + head := &Opcode{ + Op: OpStructHead, + Idx: opcodeOffset(ctx.ptrIndex), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + ctx.incOpcodeIndex() + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + head.NextField = end + head.Next = end + head.End = end + codes = codes.Add(head, end) + ctx.incIndex() + } + ctx.decIndent() + ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes + return codes +} + +func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + // header => code => structField => code => end + // ^ | + // |__________| + if c.isRecursive { + recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) + recursive.Type = c.typ + ctx.incIndex() + *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) + return Opcodes{recursive} + } + codes := Opcodes{} + var prevField *Opcode + for idx, field := range c.fields { + isFirstField := idx == 0 + isEndField := idx == len(c.fields)-1 + fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField) + for _, code := range fieldCodes { + if c.isIndirect { + code.Flags |= IndirectFlags + } + } + firstField := fieldCodes.First() + if len(codes) > 0 { + codes.Last().Next = firstField + firstField.Idx = codes.First().Idx + } + if prevField != nil { + prevField.NextField = firstField + } + if isEndField { + lastField := fieldCodes.Last() + if len(codes) > 0 { + codes.First().End = lastField + } else { + firstField.End = lastField + } + } + prevField = firstField + codes = codes.Add(fieldCodes...) + } + return codes +} + +func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) { + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.removeFieldsByTags(tags) + if len(structCode.fields) > 0 { + fields = append(fields, field) + } + continue + } + } + if tags.ExistsKey(field.key) { + continue + } + fields = append(fields, field) + } + c.fields = fields +} + +func (c *StructCode) enableIndirect() { + if c.isIndirect { + return + } + c.isIndirect = true + if len(c.fields) == 0 { + return + } + structCode := c.fields[0].getStruct() + if structCode == nil { + return + } + structCode.enableIndirect() +} + +func (c *StructCode) Filter(query *FieldQuery) Code { + fieldMap := map[string]*FieldQuery{} + for _, field := range query.Fields { + fieldMap[field.Name] = field + } + fields := make([]*StructFieldCode, 0, len(c.fields)) + for _, field := range c.fields { + query, exists := fieldMap[field.key] + if !exists { + continue + } + fieldCode := &StructFieldCode{ + typ: field.typ, + key: field.key, + tag: field.tag, + value: field.value, + offset: field.offset, + isAnonymous: field.isAnonymous, + isTaggedKey: field.isTaggedKey, + isNilableType: field.isNilableType, + isNilCheck: field.isNilCheck, + isAddrForMarshaler: field.isAddrForMarshaler, + isNextOpPtrType: field.isNextOpPtrType, + } + if len(query.Fields) > 0 { + fieldCode.value = fieldCode.value.Filter(query) + } + fields = append(fields, fieldCode) + } + return &StructCode{ + typ: c.typ, + fields: fields, + isPtr: c.isPtr, + disableIndirectConversion: c.disableIndirectConversion, + isIndirect: c.isIndirect, + isRecursive: c.isRecursive, + } +} + +type StructFieldCode struct { + typ *runtime.Type + key string + tag *runtime.StructTag + value Code + offset uintptr + isAnonymous bool + isTaggedKey bool + isNilableType bool + isNilCheck bool + isAddrForMarshaler bool + isNextOpPtrType bool + isMarshalerContext bool +} + +func (c *StructFieldCode) getStruct() *StructCode { + value := c.value + ptr, ok := value.(*PtrCode) + if ok { + value = ptr.value + } + structCode, ok := value.(*StructCode) + if ok { + return structCode + } + return nil +} + +func (c *StructFieldCode) getAnonymousStruct() *StructCode { + if !c.isAnonymous { + return nil + } + return c.getStruct() +} + +func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType { + headType := code.ToHeaderType(tag.IsString) + if tag.IsOmitEmpty { + headType = headType.HeadToOmitEmptyHead() + } + return headType +} + +func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType { + fieldType := code.ToFieldType(tag.IsString) + if tag.IsOmitEmpty { + fieldType = fieldType.FieldToOmitEmptyField() + } + return fieldType +} + +func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructHeader(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + fieldCodes := Opcodes{field} + if op.IsMultipleOpHead() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { + value := valueCodes.First() + op := optimizeStructField(value, c.tag) + field.Op = op + if value.Flags&MarshalerContextFlags != 0 { + field.Flags |= MarshalerContextFlags + } + field.NumBitSize = value.NumBitSize + field.PtrNum = value.PtrNum + field.FieldQuery = value.FieldQuery + + fieldCodes := Opcodes{field} + if op.IsMultipleOpField() { + field.Next = value + fieldCodes = fieldCodes.Add(valueCodes...) + } else { + ctx.decIndex() + } + return fieldCodes +} + +func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes { + end := &Opcode{ + Op: OpStructEnd, + Idx: opcodeOffset(ctx.ptrIndex), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } + codes.Last().Next = end + code := codes.First() + for code.Op == OpStructField || code.Op == OpStructHead { + code = code.Next + } + for code.NextField != nil { + code = code.NextField + } + code.NextField = end + + codes = codes.Add(end) + ctx.incOpcodeIndex() + return codes +} + +func (c *StructFieldCode) structKey(ctx *compileContext) string { + if ctx.escapeKey { + rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}} + return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key))) + } + return fmt.Sprintf(`"%s":`, c.key) +} + +func (c *StructFieldCode) flags() OpFlags { + var flags OpFlags + if c.isTaggedKey { + flags |= IsTaggedKeyFlags + } + if c.isNilableType { + flags |= IsNilableTypeFlags + } + if c.isNilCheck { + flags |= NilCheckFlags + } + if c.isAddrForMarshaler { + flags |= AddrForMarshalerFlags + } + if c.isNextOpPtrType { + flags |= IsNextOpPtrTypeFlags + } + if c.isAnonymous { + flags |= AnonymousKeyFlags + } + if c.isMarshalerContext { + flags |= MarshalerContextFlags + } + return flags +} + +func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes { + if c.isAnonymous { + anonymCode, ok := c.value.(AnonymousCode) + if ok { + return anonymCode.ToAnonymousOpcode(ctx) + } + } + return c.value.ToOpcode(ctx) +} + +func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags(), + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + codes := c.headerOpcodes(ctx, field, valueCodes) + if isEndField { + codes = c.addStructEndCode(ctx, codes) + } + return codes + } + codes := c.fieldOpcodes(ctx, field, valueCodes) + if isEndField { + if isEnableStructEndOptimization(c.value) { + field.Op = field.Op.FieldToEnd() + } else { + codes = c.addStructEndCode(ctx, codes) + } + } + return codes +} + +func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { + field := &Opcode{ + Idx: opcodeOffset(ctx.ptrIndex), + Flags: c.flags() | AnonymousHeadFlags, + Key: c.structKey(ctx), + Offset: uint32(c.offset), + Type: c.typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + DisplayKey: c.key, + } + ctx.incIndex() + valueCodes := c.toValueOpcodes(ctx) + if isFirstField { + return c.headerOpcodes(ctx, field, valueCodes) + } + return c.fieldOpcodes(ctx, field, valueCodes) +} + +func isEnableStructEndOptimization(value Code) bool { + switch value.Kind() { + case CodeKindInt, + CodeKindUint, + CodeKindFloat, + CodeKindString, + CodeKindBool, + CodeKindBytes: + return true + case CodeKindPtr: + return isEnableStructEndOptimization(value.(*PtrCode).value) + default: + return false + } +} + +type InterfaceCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isPtr bool +} + +func (c *InterfaceCode) Kind() CodeKind { + return CodeKindInterface +} + +func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes { + var code *Opcode + switch { + case c.isPtr: + code = newOpCode(ctx, c.typ, OpInterfacePtr) + default: + code = newOpCode(ctx, c.typ, OpInterface) + } + code.FieldQuery = c.fieldQuery + if c.typ.NumMethod() > 0 { + code.Flags |= NonEmptyInterfaceFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *InterfaceCode) Filter(query *FieldQuery) Code { + return &InterfaceCode{ + typ: c.typ, + fieldQuery: query, + isPtr: c.isPtr, + } +} + +type MarshalJSONCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool + isMarshalerContext bool +} + +func (c *MarshalJSONCode) Kind() CodeKind { + return CodeKindMarshalJSON +} + +func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalJSON) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isMarshalerContext { + code.Flags |= MarshalerContextFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalJSONCode) Filter(query *FieldQuery) Code { + return &MarshalJSONCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + isMarshalerContext: c.isMarshalerContext, + } +} + +type MarshalTextCode struct { + typ *runtime.Type + fieldQuery *FieldQuery + isAddrForMarshaler bool + isNilableType bool +} + +func (c *MarshalTextCode) Kind() CodeKind { + return CodeKindMarshalText +} + +func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes { + code := newOpCode(ctx, c.typ, OpMarshalText) + code.FieldQuery = c.fieldQuery + if c.isAddrForMarshaler { + code.Flags |= AddrForMarshalerFlags + } + if c.isNilableType { + code.Flags |= IsNilableTypeFlags + } else { + code.Flags &= ^IsNilableTypeFlags + } + ctx.incIndex() + return Opcodes{code} +} + +func (c *MarshalTextCode) Filter(query *FieldQuery) Code { + return &MarshalTextCode{ + typ: c.typ, + fieldQuery: query, + isAddrForMarshaler: c.isAddrForMarshaler, + isNilableType: c.isNilableType, + } +} + +type PtrCode struct { + typ *runtime.Type + value Code + ptrNum uint8 +} + +func (c *PtrCode) Kind() CodeKind { + return CodeKindPtr +} + +func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes { + codes := c.value.ToOpcode(ctx) + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { + var codes Opcodes + anonymCode, ok := c.value.(AnonymousCode) + if ok { + codes = anonymCode.ToAnonymousOpcode(ctx) + } else { + codes = c.value.ToOpcode(ctx) + } + codes.First().Op = convertPtrOp(codes.First()) + codes.First().PtrNum = c.ptrNum + return codes +} + +func (c *PtrCode) Filter(query *FieldQuery) Code { + return &PtrCode{ + typ: c.typ, + value: c.value.Filter(query), + ptrNum: c.ptrNum, + } +} + +func convertPtrOp(code *Opcode) OpType { + ptrHeadOp := code.Op.HeadToPtrHead() + if code.Op != ptrHeadOp { + if code.PtrNum > 0 { + // ptr field and ptr head + code.PtrNum-- + } + return ptrHeadOp + } + switch code.Op { + case OpInt: + return OpIntPtr + case OpUint: + return OpUintPtr + case OpFloat32: + return OpFloat32Ptr + case OpFloat64: + return OpFloat64Ptr + case OpString: + return OpStringPtr + case OpBool: + return OpBoolPtr + case OpBytes: + return OpBytesPtr + case OpNumber: + return OpNumberPtr + case OpArray: + return OpArrayPtr + case OpSlice: + return OpSlicePtr + case OpMap: + return OpMapPtr + case OpMarshalJSON: + return OpMarshalJSONPtr + case OpMarshalText: + return OpMarshalTextPtr + case OpInterface: + return OpInterfacePtr + case OpRecursive: + return OpRecursivePtr + } + return code.Op +} + +func isEmbeddedStruct(field *StructFieldCode) bool { + if !field.isAnonymous { + return false + } + t := field.typ + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Kind() == reflect.Struct +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compact.go b/vendor/github.com/goccy/go-json/internal/encoder/compact.go new file mode 100644 index 0000000000000..e287a6c03f43c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compact.go @@ -0,0 +1,286 @@ +package encoder + +import ( + "bytes" + "fmt" + "strconv" + "unsafe" + + "github.com/goccy/go-json/internal/errors" +) + +var ( + isWhiteSpace = [256]bool{ + ' ': true, + '\n': true, + '\t': true, + '\r': true, + } + isHTMLEscapeChar = [256]bool{ + '<': true, + '>': true, + '&': true, + } + nul = byte('\000') +) + +func Compact(buf *bytes.Buffer, src []byte, escape bool) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + buf.Grow(len(src)) + dst := buf.Bytes() + + ctx := TakeRuntimeContext() + ctxBuf := ctx.Buf[:0] + ctxBuf = append(append(ctxBuf, src...), nul) + ctx.Buf = ctxBuf + + if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil { + ReleaseRuntimeContext(ctx) + return err + } + ReleaseRuntimeContext(ctx) + return nil +} + +func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error { + dst, err := compact(dst, src, escape) + if err != nil { + return err + } + if _, err := buf.Write(dst); err != nil { + return err + } + return nil +} + +func compact(dst, src []byte, escape bool) ([]byte, error) { + buf, cursor, err := compactValue(dst, src, 0, escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func validateEndBuf(src []byte, cursor int64) error { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case nul: + return nil + } + return errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), + cursor+1, + ) + } +} + +func skipWhiteSpace(buf []byte, cursor int64) int64 { +LOOP: + if isWhiteSpace[buf[cursor]] { + cursor++ + goto LOOP + } + return cursor +} + +func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return compactObject(dst, src, cursor, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return compactArray(dst, src, cursor, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + var err error + for { + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrExpected("colon after object key", cursor) + } + dst = append(dst, ':') + dst, cursor, err = compactValue(dst, src, cursor+1, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after object value", cursor) + } + cursor++ + } +} + +func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + var err error + for { + dst, cursor, err = compactValue(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrExpected("comma after array value", cursor) + } + cursor++ + } +} + +func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { + if src[cursor] != '"' { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor) + } + start := cursor + for { + cursor++ + c := src[cursor] + if escape { + if isHTMLEscapeChar[c] { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = cursor + 1 + } else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 { + dst = append(dst, src[start:cursor]...) + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[cursor+2]&0xF]) + start = cursor + 3 + cursor += 2 + } + } + switch c { + case '\\': + cursor++ + if src[cursor] == nul { + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + case '"': + cursor++ + return append(dst, src[start:cursor]...), cursor, nil + case nul: + return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) + } + } +} + +func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) { + start := cursor + for { + cursor++ + if floatTable[src[cursor]] { + continue + } + break + } + num := src[start:cursor] + if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil { + return nil, 0, err + } + dst = append(dst, num...) + return dst, cursor, nil +} + +func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor) + } + dst = append(dst, "true"...) + cursor += 4 + return dst, cursor, nil +} + +func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+4 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor) + } + if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor) + } + dst = append(dst, "false"...) + cursor += 5 + return dst, cursor, nil +} + +func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) { + if cursor+3 >= int64(len(src)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor) + } + if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) { + return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor) + } + dst = append(dst, "null"...) + cursor += 4 + return dst, cursor, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go new file mode 100644 index 0000000000000..37b7aa38e26a1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -0,0 +1,935 @@ +package encoder + +import ( + "context" + "encoding" + "encoding/json" + "reflect" + "sync/atomic" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +type marshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +var ( + marshalJSONType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem() + marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + jsonNumberType = reflect.TypeOf(json.Number("")) + cachedOpcodeSets []*OpcodeSet + cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet + typeAddr *runtime.TypeAddr +) + +func init() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +} + +func loadOpcodeMap() map[uintptr]*OpcodeSet { + p := atomic.LoadPointer(&cachedOpcodeMap) + return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p)) +} + +func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) { + newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1) + newOpcodeMap[typ] = set + + for k, v := range m { + newOpcodeMap[k] = v + } + + atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap))) +} + +func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) { + opcodeMap := loadOpcodeMap() + if codeSet, exists := opcodeMap[typeptr]; exists { + return codeSet, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + storeOpcodeSet(typeptr, codeSet, opcodeMap) + return codeSet, nil +} + +func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) { + if (ctx.Option.Flag & ContextOption) == 0 { + return codeSet, nil + } + query := FieldQueryFromContext(ctx.Option.Context) + if query == nil { + return codeSet, nil + } + ctx.Option.Flag |= FieldQueryOption + cacheCodeSet := codeSet.getQueryCache(query.Hash()) + if cacheCodeSet != nil { + return cacheCodeSet, nil + } + queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query)) + if err != nil { + return nil, err + } + codeSet.setQueryCache(query.Hash(), queryCodeSet) + return queryCodeSet, nil +} + +type Compiler struct { + structTypeToCode map[uintptr]*StructCode +} + +func newCompiler() *Compiler { + return &Compiler{ + structTypeToCode: map[uintptr]*StructCode{}, + } +} + +func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) { + // noescape trick for header.typ ( reflect.*rtype ) + typ := *(**runtime.Type)(unsafe.Pointer(&typeptr)) + code, err := c.typeToCode(typ) + if err != nil { + return nil, err + } + return c.codeToOpcodeSet(typ, code) +} + +func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) { + noescapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + }, typ, code) + if err := noescapeKeyCode.Validate(); err != nil { + return nil, err + } + escapeKeyCode := c.codeToOpcode(&compileContext{ + structTypeToCodes: map[uintptr]Opcodes{}, + recursiveCodes: &Opcodes{}, + escapeKey: true, + }, typ, code) + noescapeKeyCode = copyOpcode(noescapeKeyCode) + escapeKeyCode = copyOpcode(escapeKeyCode) + setTotalLengthToInterfaceOp(noescapeKeyCode) + setTotalLengthToInterfaceOp(escapeKeyCode) + interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode) + interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode) + codeLength := noescapeKeyCode.TotalLength() + return &OpcodeSet{ + Type: typ, + NoescapeKeyCode: noescapeKeyCode, + EscapeKeyCode: escapeKeyCode, + InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode, + InterfaceEscapeKeyCode: interfaceEscapeKeyCode, + CodeLength: codeLength, + EndCode: ToEndCode(interfaceNoescapeKeyCode), + Code: code, + QueryCache: map[string]*OpcodeSet{}, + }, nil +} + +func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + + isPtr := false + orgType := typ + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + isPtr = true + } + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(orgType) + case c.implementsMarshalText(typ): + return c.marshalTextCode(orgType) + } + switch typ.Kind() { + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, isPtr) + } + } + return c.sliceCode(typ) + case reflect.Map: + if isPtr { + return c.ptrCode(runtime.PtrTo(typ)) + } + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Int: + return c.intCode(typ, isPtr) + case reflect.Int8: + return c.int8Code(typ, isPtr) + case reflect.Int16: + return c.int16Code(typ, isPtr) + case reflect.Int32: + return c.int32Code(typ, isPtr) + case reflect.Int64: + return c.int64Code(typ, isPtr) + case reflect.Uint, reflect.Uintptr: + return c.uintCode(typ, isPtr) + case reflect.Uint8: + return c.uint8Code(typ, isPtr) + case reflect.Uint16: + return c.uint16Code(typ, isPtr) + case reflect.Uint32: + return c.uint32Code(typ, isPtr) + case reflect.Uint64: + return c.uint64Code(typ, isPtr) + case reflect.Float32: + return c.float32Code(typ, isPtr) + case reflect.Float64: + return c.float64Code(typ, isPtr) + case reflect.String: + return c.stringCode(typ, isPtr) + case reflect.Bool: + return c.boolCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, isPtr) + default: + if isPtr && typ.Implements(marshalTextType) { + typ = orgType + } + return c.typeToCodeWithPtr(typ, isPtr) + } +} + +func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) { + switch { + case c.implementsMarshalJSON(typ): + return c.marshalJSONCode(typ) + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.Slice: + elem := typ.Elem() + if elem.Kind() == reflect.Uint8 { + p := runtime.PtrTo(elem) + if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { + return c.bytesCode(typ, false) + } + } + return c.sliceCode(typ) + case reflect.Array: + return c.arrayCode(typ) + case reflect.Map: + return c.mapCode(typ) + case reflect.Struct: + return c.structCode(typ, isPtr) + case reflect.Interface: + return c.interfaceCode(typ, false) + case reflect.Int: + return c.intCode(typ, false) + case reflect.Int8: + return c.int8Code(typ, false) + case reflect.Int16: + return c.int16Code(typ, false) + case reflect.Int32: + return c.int32Code(typ, false) + case reflect.Int64: + return c.int64Code(typ, false) + case reflect.Uint: + return c.uintCode(typ, false) + case reflect.Uint8: + return c.uint8Code(typ, false) + case reflect.Uint16: + return c.uint16Code(typ, false) + case reflect.Uint32: + return c.uint32Code(typ, false) + case reflect.Uint64: + return c.uint64Code(typ, false) + case reflect.Uintptr: + return c.uintCode(typ, false) + case reflect.Float32: + return c.float32Code(typ, false) + case reflect.Float64: + return c.float64Code(typ, false) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Bool: + return c.boolCode(typ, false) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +const intSize = 32 << (^uint(0) >> 63) + +//nolint:unparam +func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { + return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) { + return &StringCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) { + return &BoolCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) { + return &IntCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 8, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 16, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 32, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) { + return &UintCode{typ: typ, bitSize: 64, isString: true}, nil +} + +//nolint:unparam +func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) { + return &BytesCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) { + return &InterfaceCode{typ: typ, isPtr: isPtr}, nil +} + +//nolint:unparam +func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) { + return &MarshalJSONCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalJSONType(typ), + isNilableType: c.isNilableType(typ), + isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType), + }, nil +} + +//nolint:unparam +func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) { + return &MarshalTextCode{ + typ: typ, + isAddrForMarshaler: c.isPtrMarshalTextType(typ), + isNilableType: c.isNilableType(typ), + }, nil +} + +func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) { + code, err := c.typeToCodeWithPtr(typ.Elem(), true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil + } + return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil +} + +func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &SliceCode{typ: typ, value: code}, nil +} + +func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) { + elem := typ.Elem() + code, err := c.listElemCode(elem) + if err != nil { + return nil, err + } + if code.Kind() == CodeKindStruct { + structCode := code.(*StructCode) + structCode.enableIndirect() + } + return &ArrayCode{typ: typ, value: code}, nil +} + +func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) { + keyCode, err := c.mapKeyCode(typ.Key()) + if err != nil { + return nil, err + } + valueCode, err := c.mapValueCode(typ.Elem()) + if err != nil { + return nil, err + } + if valueCode.Kind() == CodeKindStruct { + structCode := valueCode.(*StructCode) + structCode.enableIndirect() + } + return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil +} + +func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalJSONType(typ) || c.implementsMarshalJSONType(runtime.PtrTo(typ)): + return c.marshalJSONCode(typ) + case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType): + return c.marshalTextCode(typ) + case typ.Kind() == reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + // isPtr was originally used to indicate whether the type of top level is pointer. + // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. + // See here for related issues: https://github.com/goccy/go-json/issues/370 + code, err := c.typeToCodeWithPtr(typ, true) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { + switch { + case c.implementsMarshalText(typ): + return c.marshalTextCode(typ) + } + switch typ.Kind() { + case reflect.Ptr: + return c.ptrCode(typ) + case reflect.String: + return c.stringCode(typ, false) + case reflect.Int: + return c.intStringCode(typ) + case reflect.Int8: + return c.int8StringCode(typ) + case reflect.Int16: + return c.int16StringCode(typ) + case reflect.Int32: + return c.int32StringCode(typ) + case reflect.Int64: + return c.int64StringCode(typ) + case reflect.Uint: + return c.uintStringCode(typ) + case reflect.Uint8: + return c.uint8StringCode(typ) + case reflect.Uint16: + return c.uint16StringCode(typ) + case reflect.Uint32: + return c.uint32StringCode(typ) + case reflect.Uint64: + return c.uint64StringCode(typ) + case reflect.Uintptr: + return c.uintStringCode(typ) + } + return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} +} + +func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) { + switch typ.Kind() { + case reflect.Map: + return c.ptrCode(runtime.PtrTo(typ)) + default: + code, err := c.typeToCodeWithPtr(typ, false) + if err != nil { + return nil, err + } + ptr, ok := code.(*PtrCode) + if ok { + if ptr.value.Kind() == CodeKindMap { + ptr.ptrNum++ + } + } + return code, nil + } +} + +func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) { + typeptr := uintptr(unsafe.Pointer(typ)) + if code, exists := c.structTypeToCode[typeptr]; exists { + derefCode := *code + derefCode.isRecursive = true + return &derefCode, nil + } + indirect := runtime.IfaceIndir(typ) + code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect} + c.structTypeToCode[typeptr] = code + + fieldNum := typ.NumField() + tags := c.typeToStructTags(typ) + fields := []*StructFieldCode{} + for i, tag := range tags { + isOnlyOneFirstField := i == 0 && fieldNum == 1 + field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField) + if err != nil { + return nil, err + } + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil { + structCode.removeFieldsByTags(tags) + if c.isAssignableIndirect(field, isPtr) { + if indirect { + structCode.isIndirect = true + } else { + structCode.isIndirect = false + } + } + } + } else { + structCode := field.getStruct() + if structCode != nil { + if indirect { + // if parent is indirect type, set child indirect property to true + structCode.isIndirect = true + } else { + // if parent is not indirect type, set child indirect property to false. + // but if parent's indirect is false and isPtr is true, then indirect must be true. + // Do this only if indirectConversion is enabled at the end of compileStruct. + structCode.isIndirect = false + } + } + } + fields = append(fields, field) + } + fieldMap := c.getFieldMap(fields) + duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap) + code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap) + if !code.disableIndirectConversion && !indirect && isPtr { + code.enableIndirect() + } + delete(c.structTypeToCode, typeptr) + return code, nil +} + +func toElemType(t *runtime.Type) *runtime.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { + field := tag.Field + fieldType := runtime.Type2RType(field.Type) + isIndirectSpecialCase := isPtr && isOnlyOneFirstField + fieldCode := &StructFieldCode{ + typ: fieldType, + key: tag.Key, + tag: tag, + offset: field.Offset, + isAnonymous: field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct, + isTaggedKey: tag.IsTaggedKey, + isNilableType: c.isNilableType(fieldType), + isNilCheck: true, + } + switch { + case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase): + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + structCode.isIndirect = false + structCode.disableIndirectConversion = true + case isPtr && c.isPtrMarshalJSONType(fieldType): + // *struct{ field T } + // func (*T) MarshalJSON() ([]byte, error) + code, err := c.marshalJSONCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + case isPtr && c.isPtrMarshalTextType(fieldType): + // *struct{ field T } + // func (*T) MarshalText() ([]byte, error) + code, err := c.marshalTextCode(fieldType) + if err != nil { + return nil, err + } + fieldCode.value = code + fieldCode.isAddrForMarshaler = true + fieldCode.isNilCheck = false + default: + code, err := c.typeToCodeWithPtr(fieldType, isPtr) + if err != nil { + return nil, err + } + switch code.Kind() { + case CodeKindPtr, CodeKindInterface: + fieldCode.isNextOpPtrType = true + } + fieldCode.value = code + } + return fieldCode, nil +} + +func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool { + if isPtr { + return false + } + codeType := fieldCode.value.Kind() + if codeType == CodeKindMarshalJSON { + return false + } + if codeType == CodeKindMarshalText { + return false + } + return true +} + +func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + structCode := field.getAnonymousStruct() + if structCode == nil || structCode.isRecursive { + fieldMap[field.key] = append(fieldMap[field.key], field) + return fieldMap + } + for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) { + fieldMap[k] = append(fieldMap[k], v...) + } + return fieldMap +} + +func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode { + fieldMap := map[string][]*StructFieldCode{} + for _, field := range fields { + if field.isAnonymous { + for k, v := range c.getAnonymousFieldMap(field) { + // Do not handle tagged key when embedding more than once + for _, vv := range v { + vv.isTaggedKey = false + } + fieldMap[k] = append(fieldMap[k], v...) + } + continue + } + fieldMap[field.key] = append(fieldMap[field.key], field) + } + return fieldMap +} + +func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} { + duplicatedFieldMap := map[*StructFieldCode]struct{}{} + for _, fields := range fieldMap { + if len(fields) == 1 { + continue + } + if c.isTaggedKeyOnly(fields) { + for _, field := range fields { + if field.isTaggedKey { + continue + } + duplicatedFieldMap[field] = struct{}{} + } + } else { + for _, field := range fields { + duplicatedFieldMap[field] = struct{}{} + } + } + } + return duplicatedFieldMap +} + +func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode { + filteredFields := make([]*StructFieldCode, 0, len(fields)) + for _, field := range fields { + if field.isAnonymous { + structCode := field.getAnonymousStruct() + if structCode != nil && !structCode.isRecursive { + structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap) + if len(structCode.fields) > 0 { + filteredFields = append(filteredFields, field) + } + continue + } + } + if _, exists := duplicatedFieldMap[field]; exists { + continue + } + filteredFields = append(filteredFields, field) + } + return filteredFields +} + +func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool { + var taggedKeyFieldCount int + for _, field := range fields { + if field.isTaggedKey { + taggedKeyFieldCount++ + } + } + return taggedKeyFieldCount == 1 +} + +func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags { + tags := runtime.StructTags{} + fieldNum := typ.NumField() + for i := 0; i < fieldNum; i++ { + field := typ.Field(i) + if runtime.IsIgnoredStructField(field) { + continue + } + tags = append(tags, runtime.StructTagFromField(field)) + } + return tags +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalJSON() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ) +} + +// *struct{ field T } => struct { field *T } +// func (*T) MarshalText() ([]byte, error) +func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { + return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ) +} + +func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool { + if !c.implementsMarshalJSONType(typ) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !c.implementsMarshalJSONType(typ.Elem()) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool { + if !typ.Implements(marshalTextType) { + return false + } + if typ.Kind() != reflect.Ptr { + return true + } + // type kind is reflect.Ptr + if !typ.Elem().Implements(marshalTextType) { + return true + } + // needs to dereference + return false +} + +func (c *Compiler) isNilableType(typ *runtime.Type) bool { + if !runtime.IfaceIndir(typ) { + return true + } + switch typ.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Func: + return true + default: + return false + } +} + +func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool { + return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType) +} + +func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool { + return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ)) +} + +func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool { + return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType) +} + +func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode { + codes := code.ToOpcode(ctx) + codes.Last().Next = newEndOp(ctx, typ) + c.linkRecursiveCode(ctx) + return codes.First() +} + +func (c *Compiler) linkRecursiveCode(ctx *compileContext) { + recursiveCodes := map[uintptr]*CompiledCode{} + for _, recursive := range *ctx.recursiveCodes { + typeptr := uintptr(unsafe.Pointer(recursive.Type)) + codes := ctx.structTypeToCodes[typeptr] + if recursiveCode, ok := recursiveCodes[typeptr]; ok { + *recursive.Jmp = *recursiveCode + continue + } + + code := copyOpcode(codes.First()) + code.Op = code.Op.PtrHeadToHead() + lastCode := newEndOp(&compileContext{}, recursive.Type) + lastCode.Op = OpRecursiveEnd + + // OpRecursiveEnd must set before call TotalLength + code.End.Next = lastCode + + totalLength := code.TotalLength() + + // Idx, ElemIdx, Length must set after call TotalLength + lastCode.Idx = uint32((totalLength + 1) * uintptrSize) + lastCode.ElemIdx = lastCode.Idx + uintptrSize + lastCode.Length = lastCode.Idx + 2*uintptrSize + + // extend length to alloc slot for elemIdx + length + curTotalLength := uintptr(recursive.TotalLength()) + 3 + nextTotalLength := uintptr(totalLength) + 3 + + compiled := recursive.Jmp + compiled.Code = code + compiled.CurLen = curTotalLength + compiled.NextLen = nextTotalLength + compiled.Linked = true + + recursiveCodes[typeptr] = compiled + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go new file mode 100644 index 0000000000000..20c93cbf70988 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -0,0 +1,32 @@ +//go:build !race +// +build !race + +package encoder + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + return filtered, nil + } + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + cachedOpcodeSets[index] = codeSet + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go new file mode 100644 index 0000000000000..13ba23fdff8e5 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -0,0 +1,45 @@ +//go:build race +// +build race + +package encoder + +import ( + "sync" +) + +var setsMu sync.RWMutex + +func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { + codeSet, err := compileToGetCodeSetSlowPath(typeptr) + if err != nil { + return nil, err + } + return getFilteredCodeSetIfNeeded(ctx, codeSet) + } + index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift + setsMu.RLock() + if codeSet := cachedOpcodeSets[index]; codeSet != nil { + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + setsMu.RUnlock() + return nil, err + } + setsMu.RUnlock() + return filtered, nil + } + setsMu.RUnlock() + + codeSet, err := newCompiler().compile(typeptr) + if err != nil { + return nil, err + } + filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) + if err != nil { + return nil, err + } + setsMu.Lock() + cachedOpcodeSets[index] = codeSet + setsMu.Unlock() + return filtered, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/context.go b/vendor/github.com/goccy/go-json/internal/encoder/context.go new file mode 100644 index 0000000000000..3833d0c86db5f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/context.go @@ -0,0 +1,105 @@ +package encoder + +import ( + "context" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +type compileContext struct { + opcodeIndex uint32 + ptrIndex int + indent uint32 + escapeKey bool + structTypeToCodes map[uintptr]Opcodes + recursiveCodes *Opcodes +} + +func (c *compileContext) incIndent() { + c.indent++ +} + +func (c *compileContext) decIndent() { + c.indent-- +} + +func (c *compileContext) incIndex() { + c.incOpcodeIndex() + c.incPtrIndex() +} + +func (c *compileContext) decIndex() { + c.decOpcodeIndex() + c.decPtrIndex() +} + +func (c *compileContext) incOpcodeIndex() { + c.opcodeIndex++ +} + +func (c *compileContext) decOpcodeIndex() { + c.opcodeIndex-- +} + +func (c *compileContext) incPtrIndex() { + c.ptrIndex++ +} + +func (c *compileContext) decPtrIndex() { + c.ptrIndex-- +} + +const ( + bufSize = 1024 +) + +var ( + runtimeContextPool = sync.Pool{ + New: func() interface{} { + return &RuntimeContext{ + Buf: make([]byte, 0, bufSize), + Ptrs: make([]uintptr, 128), + KeepRefs: make([]unsafe.Pointer, 0, 8), + Option: &Option{}, + } + }, + } +) + +type RuntimeContext struct { + Context context.Context + Buf []byte + MarshalBuf []byte + Ptrs []uintptr + KeepRefs []unsafe.Pointer + SeenPtr []uintptr + BaseIndent uint32 + Prefix []byte + IndentStr []byte + Option *Option +} + +func (c *RuntimeContext) Init(p uintptr, codelen int) { + if len(c.Ptrs) < codelen { + c.Ptrs = make([]uintptr, codelen) + } + c.Ptrs[0] = p + c.KeepRefs = c.KeepRefs[:0] + c.SeenPtr = c.SeenPtr[:0] + c.BaseIndent = 0 +} + +func (c *RuntimeContext) Ptr() uintptr { + header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs)) + return uintptr(header.Data) +} + +func TakeRuntimeContext() *RuntimeContext { + return runtimeContextPool.Get().(*RuntimeContext) +} + +func ReleaseRuntimeContext(ctx *RuntimeContext) { + runtimeContextPool.Put(ctx) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go new file mode 100644 index 0000000000000..35c959d481857 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go @@ -0,0 +1,126 @@ +package encoder + +import "unicode/utf8" + +const ( + // The default lowest and highest continuation byte. + locb = 128 //0b10000000 + hicb = 191 //0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + lineSep = byte(168) //'\u2028' + paragraphSep = byte(169) //'\u2029' +) + +type decodeRuneState int + +const ( + validUTF8State decodeRuneState = iota + runeErrorState + lineSepState + paragraphSepState +) + +func decodeRuneInString(s string) (decodeRuneState, int) { + n := len(s) + s0 := s[0] + x := first[s0] + if x >= as { + // The following code simulates an additional check for x == xx and + // handling the ASCII and invalid cases accordingly. This mask-and-or + // approach prevents an additional branch. + mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF. + if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError { + return runeErrorState, 1 + } + return validUTF8State, 1 + } + sz := int(x & 7) + if n < sz { + return runeErrorState, 1 + } + s1 := s[1] + switch x >> 4 { + case 0: + if s1 < locb || hicb < s1 { + return runeErrorState, 1 + } + case 1: + if s1 < 0xA0 || hicb < s1 { + return runeErrorState, 1 + } + case 2: + if s1 < locb || 0x9F < s1 { + return runeErrorState, 1 + } + case 3: + if s1 < 0x90 || hicb < s1 { + return runeErrorState, 1 + } + case 4: + if s1 < locb || 0x8F < s1 { + return runeErrorState, 1 + } + } + if sz <= 2 { + return validUTF8State, 2 + } + s2 := s[2] + if s2 < locb || hicb < s2 { + return runeErrorState, 1 + } + if sz <= 3 { + // separator character prefixes: [2]byte{226, 128} + if s0 == 226 && s1 == 128 { + switch s2 { + case lineSep: + return lineSepState, 3 + case paragraphSep: + return paragraphSepState, 3 + } + } + return validUTF8State, 3 + } + s3 := s[3] + if s3 < locb || hicb < s3 { + return runeErrorState, 1 + } + return validUTF8State, 4 +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go new file mode 100644 index 0000000000000..14eb6a0d643b9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -0,0 +1,596 @@ +package encoder + +import ( + "bytes" + "encoding" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "sync" + "unsafe" + + "github.com/goccy/go-json/internal/errors" + "github.com/goccy/go-json/internal/runtime" +) + +func (t OpType) IsMultipleOpHead() bool { + switch t { + case OpStructHead: + return true + case OpStructHeadSlice: + return true + case OpStructHeadArray: + return true + case OpStructHeadMap: + return true + case OpStructHeadStruct: + return true + case OpStructHeadOmitEmpty: + return true + case OpStructHeadOmitEmptySlice: + return true + case OpStructHeadOmitEmptyArray: + return true + case OpStructHeadOmitEmptyMap: + return true + case OpStructHeadOmitEmptyStruct: + return true + case OpStructHeadSlicePtr: + return true + case OpStructHeadOmitEmptySlicePtr: + return true + case OpStructHeadArrayPtr: + return true + case OpStructHeadOmitEmptyArrayPtr: + return true + case OpStructHeadMapPtr: + return true + case OpStructHeadOmitEmptyMapPtr: + return true + } + return false +} + +func (t OpType) IsMultipleOpField() bool { + switch t { + case OpStructField: + return true + case OpStructFieldSlice: + return true + case OpStructFieldArray: + return true + case OpStructFieldMap: + return true + case OpStructFieldStruct: + return true + case OpStructFieldOmitEmpty: + return true + case OpStructFieldOmitEmptySlice: + return true + case OpStructFieldOmitEmptyArray: + return true + case OpStructFieldOmitEmptyMap: + return true + case OpStructFieldOmitEmptyStruct: + return true + case OpStructFieldSlicePtr: + return true + case OpStructFieldOmitEmptySlicePtr: + return true + case OpStructFieldArrayPtr: + return true + case OpStructFieldOmitEmptyArrayPtr: + return true + case OpStructFieldMapPtr: + return true + case OpStructFieldOmitEmptyMapPtr: + return true + } + return false +} + +type OpcodeSet struct { + Type *runtime.Type + NoescapeKeyCode *Opcode + EscapeKeyCode *Opcode + InterfaceNoescapeKeyCode *Opcode + InterfaceEscapeKeyCode *Opcode + CodeLength int + EndCode *Opcode + Code Code + QueryCache map[string]*OpcodeSet + cacheMu sync.RWMutex +} + +func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet { + s.cacheMu.RLock() + codeSet := s.QueryCache[hash] + s.cacheMu.RUnlock() + return codeSet +} + +func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) { + s.cacheMu.Lock() + s.QueryCache[hash] = codeSet + s.cacheMu.Unlock() +} + +type CompiledCode struct { + Code *Opcode + Linked bool // whether recursive code already have linked + CurLen uintptr + NextLen uintptr +} + +const StartDetectingCyclesAfter = 1000 + +func Load(base uintptr, idx uintptr) uintptr { + addr := base + idx + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func Store(base uintptr, idx uintptr, p uintptr) { + addr := base + idx + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr { + addr := base + idx + p := **(**uintptr)(unsafe.Pointer(&addr)) + if p == 0 { + return 0 + } + return PtrToPtr(p) + /* + for i := 0; i < ptrNum; i++ { + if p == 0 { + return p + } + p = PtrToPtr(p) + } + return p + */ +} + +func PtrToUint64(p uintptr) uint64 { return **(**uint64)(unsafe.Pointer(&p)) } +func PtrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func PtrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func PtrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func PtrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func PtrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func PtrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func PtrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func PtrToNPtr(p uintptr, ptrNum int) uintptr { + for i := 0; i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = PtrToPtr(p) + } + return p +} + +func PtrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func PtrToInterface(code *Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError { + v := *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)), + })) + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: fmt.Sprintf("encountered a cycle via %s", code.Type), + } +} + +func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError { + return &errors.UnsupportedValueError{ + Value: reflect.ValueOf(v), + Str: strconv.FormatFloat(v, 'g', -1, 64), + } +} + +func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError { + return &errors.MarshalerError{ + Type: runtime.RType2Type(code.Type), + Err: err, + } +} + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type MapItem struct { + Key []byte + Value []byte +} + +type Mapslice struct { + Items []MapItem +} + +func (m *Mapslice) Len() int { + return len(m.Items) +} + +func (m *Mapslice) Less(i, j int) bool { + return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0 +} + +func (m *Mapslice) Swap(i, j int) { + m.Items[i], m.Items[j] = m.Items[j], m.Items[i] +} + +//nolint:structcheck,unused +type mapIter struct { + key unsafe.Pointer + elem unsafe.Pointer + t unsafe.Pointer + h unsafe.Pointer + buckets unsafe.Pointer + bptr unsafe.Pointer + overflow unsafe.Pointer + oldoverflow unsafe.Pointer + startBucket uintptr + offset uint8 + wrapped bool + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +type MapContext struct { + Start int + First int + Idx int + Slice *Mapslice + Buf []byte + Len int + Iter mapIter +} + +var mapContextPool = sync.Pool{ + New: func() interface{} { + return &MapContext{ + Slice: &Mapslice{}, + } + }, +} + +func NewMapContext(mapLen int, unorderedMap bool) *MapContext { + ctx := mapContextPool.Get().(*MapContext) + if !unorderedMap { + if len(ctx.Slice.Items) < mapLen { + ctx.Slice.Items = make([]MapItem, mapLen) + } else { + ctx.Slice.Items = ctx.Slice.Items[:mapLen] + } + } + ctx.Buf = ctx.Buf[:0] + ctx.Iter = mapIter{} + ctx.Idx = 0 + ctx.Len = mapLen + return ctx +} + +func ReleaseMapContext(c *MapContext) { + mapContextPool.Put(c) +} + +//go:linkname MapIterInit runtime.mapiterinit +//go:noescape +func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter) + +//go:linkname MapIterKey reflect.mapiterkey +//go:noescape +func MapIterKey(it *mapIter) unsafe.Pointer + +//go:linkname MapIterNext reflect.mapiternext +//go:noescape +func MapIterNext(it *mapIter) + +//go:linkname MapLen reflect.maplen +//go:noescape +func MapLen(m unsafe.Pointer) int + +func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte { + if src == nil { + return append(b, `null`...) + } + encodedLen := base64.StdEncoding.EncodedLen(len(src)) + b = append(b, '"') + pos := len(b) + remainLen := cap(b[pos:]) + var buf []byte + if remainLen > encodedLen { + buf = b[pos : pos+encodedLen] + } else { + buf = make([]byte, encodedLen) + } + base64.StdEncoding.Encode(buf, src) + return append(append(b, buf...), '"') +} + +func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte { + f64 := float64(v) + abs := math.Abs(f64) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + f32 := float32(abs) + if f32 < 1e-6 || f32 >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, f64, fmt, -1, 32) +} + +func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte { + abs := math.Abs(v) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + return strconv.AppendFloat(b, v, fmt, -1, 64) +} + +func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +var ( + floatTable = [256]bool{ + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + '.': true, + 'e': true, + 'E': true, + '+': true, + '-': true, + } +) + +func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) { + if len(n) == 0 { + return append(b, '0'), nil + } + for i := 0; i < len(n); i++ { + if !floatTable[n[i]] { + return nil, fmt.Errorf("json: invalid number literal %q", n) + } + } + b = append(b, n...) + return b, nil +} + +func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + stdctx := ctx.Option.Context + if ctx.Option.Flag&FieldQueryOption != 0 { + stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery) + } + b, err := marshaler.MarshalJSON(stdctx) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return compactedBuf, nil +} + +func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + var bb []byte + if (code.Flags & MarshalerContextFlags) != 0 { + marshaler, ok := v.(marshalerContext) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON(ctx.Option.Context) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } else { + marshaler, ok := v.(json.Marshaler) + if !ok { + return AppendNull(ctx, b), nil + } + b, err := marshaler.MarshalJSON() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + bb = b + } + marshalBuf := ctx.MarshalBuf[:0] + marshalBuf = append(append(marshalBuf, bb...), nul) + indentedBuf, err := doIndent( + b, + marshalBuf, + string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)), + string(ctx.IndentStr), + (ctx.Option.Flag&HTMLEscapeOption) != 0, + ) + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + ctx.MarshalBuf = marshalBuf + return indentedBuf, nil +} + +func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { + rv := reflect.ValueOf(v) // convert by dynamic interface type + if (code.Flags & AddrForMarshalerFlags) != 0 { + if rv.CanAddr() { + rv = rv.Addr() + } else { + newV := reflect.New(rv.Type()) + newV.Elem().Set(rv) + rv = newV + } + } + v = rv.Interface() + marshaler, ok := v.(encoding.TextMarshaler) + if !ok { + return AppendNull(ctx, b), nil + } + bytes, err := marshaler.MarshalText() + if err != nil { + return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} + } + return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil +} + +func AppendNull(_ *RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func AppendComma(_ *RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func AppendStructEnd(_ *RuntimeContext, b []byte) []byte { + return append(b, '}', ',') +} + +func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte { + b = append(b, '\n') + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + code.Indent - 1 + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return append(b, '}', ',', '\n') +} + +func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte { + b = append(b, ctx.Prefix...) + indentNum := ctx.BaseIndent + indent + for i := uint32(0); i < indentNum; i++ { + b = append(b, ctx.IndentStr...) + } + return b +} + +func IsNilForMarshaler(v interface{}) bool { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Bool: + return !rv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(rv.Float()) == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func: + return rv.IsNil() + case reflect.Slice: + return rv.IsNil() || rv.Len() == 0 + case reflect.String: + return rv.Len() == 0 + } + return false +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/indent.go b/vendor/github.com/goccy/go-json/internal/encoder/indent.go new file mode 100644 index 0000000000000..dfe04b5e3c43f --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/indent.go @@ -0,0 +1,211 @@ +package encoder + +import ( + "bytes" + "fmt" + + "github.com/goccy/go-json/internal/errors" +) + +func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) { + ctx := TakeRuntimeContext() + buf := ctx.Buf[:0] + buf = append(append(buf, src...), nul) + ctx.Buf = buf + return ctx, buf +} + +func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error { + if len(src) == 0 { + return errors.ErrUnexpectedEndOfJSON("", 0) + } + + srcCtx, srcBuf := takeIndentSrcRuntimeContext(src) + dstCtx := TakeRuntimeContext() + dst := dstCtx.Buf[:0] + + dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr) + if err != nil { + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return err + } + dstCtx.Buf = dst + ReleaseRuntimeContext(srcCtx) + ReleaseRuntimeContext(dstCtx) + return nil +} + +func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) { + dst, err := doIndent(dst, src, prefix, indentStr, false) + if err != nil { + return nil, err + } + if _, err := buf.Write(dst); err != nil { + return nil, err + } + return dst, nil +} + +func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) { + buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape) + if err != nil { + return nil, err + } + if err := validateEndBuf(src, cursor); err != nil { + return nil, err + } + return buf, nil +} + +func indentValue( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + for { + switch src[cursor] { + case ' ', '\t', '\n', '\r': + cursor++ + continue + case '{': + return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case '}': + return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) + case '[': + return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape) + case ']': + return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) + case '"': + return compactString(dst, src, cursor, escape) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return compactNumber(dst, src, cursor) + case 't': + return compactTrue(dst, src, cursor) + case 'f': + return compactFalse(dst, src, cursor) + case 'n': + return compactNull(dst, src, cursor) + default: + return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) + } + } +} + +func indentObject( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '{' { + dst = append(dst, '{') + } else { + return nil, 0, errors.ErrExpected("expected { character for object value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == '}' { + dst = append(dst, '}') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + cursor = skipWhiteSpace(src, cursor) + dst, cursor, err = compactString(dst, src, cursor, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + if src[cursor] != ':' { + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key", src[cursor]), + cursor+1, + ) + } + dst = append(dst, ':', ' ') + dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case '}': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, '}') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} + +func indentArray( + dst []byte, + src []byte, + indentNum int, + cursor int64, + prefix []byte, + indentBytes []byte, + escape bool) ([]byte, int64, error) { + if src[cursor] == '[' { + dst = append(dst, '[') + } else { + return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) + } + cursor = skipWhiteSpace(src, cursor+1) + if src[cursor] == ']' { + dst = append(dst, ']') + return dst, cursor + 1, nil + } + indentNum++ + var err error + for { + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum; i++ { + dst = append(dst, indentBytes...) + } + dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape) + if err != nil { + return nil, 0, err + } + cursor = skipWhiteSpace(src, cursor) + switch src[cursor] { + case ']': + dst = append(append(dst, '\n'), prefix...) + for i := 0; i < indentNum-1; i++ { + dst = append(dst, indentBytes...) + } + dst = append(dst, ']') + cursor++ + return dst, cursor, nil + case ',': + dst = append(dst, ',') + default: + return nil, 0, errors.ErrSyntax( + fmt.Sprintf("invalid character '%c' after array value", src[cursor]), + cursor+1, + ) + } + cursor++ + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/int.go b/vendor/github.com/goccy/go-json/internal/encoder/int.go new file mode 100644 index 0000000000000..8b5febeaa63b9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/int.go @@ -0,0 +1,176 @@ +// This files's processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func numMask(numBitSize uint8) uint64 { + return 1<>(code.NumBitSize-1))&1 == 1 + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n & mask + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} + +func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { + var u64 uint64 + switch code.NumBitSize { + case 8: + u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + u64 = **(**uint64)(unsafe.Pointer(&p)) + } + mask := numMask(code.NumBitSize) + n := u64 & mask + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + return append(out, b[i:]...) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map112.go b/vendor/github.com/goccy/go-json/internal/encoder/map112.go new file mode 100644 index 0000000000000..e96ffadf7abf0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map112.go @@ -0,0 +1,9 @@ +//go:build !go1.13 +// +build !go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapitervalue +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/map113.go b/vendor/github.com/goccy/go-json/internal/encoder/map113.go new file mode 100644 index 0000000000000..9b69dcc360dcc --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/map113.go @@ -0,0 +1,9 @@ +//go:build go1.13 +// +build go1.13 + +package encoder + +import "unsafe" + +//go:linkname MapIterValue reflect.mapiterelem +func MapIterValue(it *mapIter) unsafe.Pointer diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go new file mode 100644 index 0000000000000..df22f55423d02 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go @@ -0,0 +1,752 @@ +package encoder + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unsafe" + + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +type OpFlags uint16 + +const ( + AnonymousHeadFlags OpFlags = 1 << 0 + AnonymousKeyFlags OpFlags = 1 << 1 + IndirectFlags OpFlags = 1 << 2 + IsTaggedKeyFlags OpFlags = 1 << 3 + NilCheckFlags OpFlags = 1 << 4 + AddrForMarshalerFlags OpFlags = 1 << 5 + IsNextOpPtrTypeFlags OpFlags = 1 << 6 + IsNilableTypeFlags OpFlags = 1 << 7 + MarshalerContextFlags OpFlags = 1 << 8 + NonEmptyInterfaceFlags OpFlags = 1 << 9 +) + +type Opcode struct { + Op OpType // operation type + Idx uint32 // offset to access ptr + Next *Opcode // next opcode + End *Opcode // array/slice/struct/map end + NextField *Opcode // next struct field + Key string // struct field key + Offset uint32 // offset size from struct header + PtrNum uint8 // pointer number: e.g. double pointer is 2. + NumBitSize uint8 + Flags OpFlags + + Type *runtime.Type // go type + Jmp *CompiledCode // for recursive call + FieldQuery *FieldQuery // field query for Interface / MarshalJSON / MarshalText + ElemIdx uint32 // offset to access array/slice elem + Length uint32 // offset to access slice length or array length + Indent uint32 // indent number + Size uint32 // array/slice elem size + DisplayIdx uint32 // opcode index + DisplayKey string // key text to display +} + +func (c *Opcode) Validate() error { + var prevIdx uint32 + for code := c; !code.IsEnd(); { + if prevIdx != 0 { + if code.DisplayIdx != prevIdx+1 { + return fmt.Errorf( + "invalid index. previous display index is %d but next is %d. dump = %s", + prevIdx, code.DisplayIdx, c.Dump(), + ) + } + } + prevIdx = code.DisplayIdx + code = code.IterNext() + } + return nil +} + +func (c *Opcode) IterNext() *Opcode { + if c == nil { + return nil + } + switch c.Op.CodeType() { + case CodeArrayElem, CodeSliceElem, CodeMapKey: + return c.End + default: + return c.Next + } +} + +func (c *Opcode) IsEnd() bool { + if c == nil { + return true + } + return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd +} + +func (c *Opcode) MaxIdx() uint32 { + max := uint32(0) + for _, value := range []uint32{ + c.Idx, + c.ElemIdx, + c.Length, + c.Size, + } { + if max < value { + max = value + } + } + return max +} + +func (c *Opcode) ToHeaderType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructHeadIntString + } + return OpStructHeadInt + case OpIntPtr: + if isString { + return OpStructHeadIntPtrString + } + return OpStructHeadIntPtr + case OpUint: + if isString { + return OpStructHeadUintString + } + return OpStructHeadUint + case OpUintPtr: + if isString { + return OpStructHeadUintPtrString + } + return OpStructHeadUintPtr + case OpFloat32: + if isString { + return OpStructHeadFloat32String + } + return OpStructHeadFloat32 + case OpFloat32Ptr: + if isString { + return OpStructHeadFloat32PtrString + } + return OpStructHeadFloat32Ptr + case OpFloat64: + if isString { + return OpStructHeadFloat64String + } + return OpStructHeadFloat64 + case OpFloat64Ptr: + if isString { + return OpStructHeadFloat64PtrString + } + return OpStructHeadFloat64Ptr + case OpString: + if isString { + return OpStructHeadStringString + } + return OpStructHeadString + case OpStringPtr: + if isString { + return OpStructHeadStringPtrString + } + return OpStructHeadStringPtr + case OpNumber: + if isString { + return OpStructHeadNumberString + } + return OpStructHeadNumber + case OpNumberPtr: + if isString { + return OpStructHeadNumberPtrString + } + return OpStructHeadNumberPtr + case OpBool: + if isString { + return OpStructHeadBoolString + } + return OpStructHeadBool + case OpBoolPtr: + if isString { + return OpStructHeadBoolPtrString + } + return OpStructHeadBoolPtr + case OpBytes: + return OpStructHeadBytes + case OpBytesPtr: + return OpStructHeadBytesPtr + case OpMap: + return OpStructHeadMap + case OpMapPtr: + c.Op = OpMap + return OpStructHeadMapPtr + case OpArray: + return OpStructHeadArray + case OpArrayPtr: + c.Op = OpArray + return OpStructHeadArrayPtr + case OpSlice: + return OpStructHeadSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructHeadSlicePtr + case OpMarshalJSON: + return OpStructHeadMarshalJSON + case OpMarshalJSONPtr: + return OpStructHeadMarshalJSONPtr + case OpMarshalText: + return OpStructHeadMarshalText + case OpMarshalTextPtr: + return OpStructHeadMarshalTextPtr + } + return OpStructHead +} + +func (c *Opcode) ToFieldType(isString bool) OpType { + switch c.Op { + case OpInt: + if isString { + return OpStructFieldIntString + } + return OpStructFieldInt + case OpIntPtr: + if isString { + return OpStructFieldIntPtrString + } + return OpStructFieldIntPtr + case OpUint: + if isString { + return OpStructFieldUintString + } + return OpStructFieldUint + case OpUintPtr: + if isString { + return OpStructFieldUintPtrString + } + return OpStructFieldUintPtr + case OpFloat32: + if isString { + return OpStructFieldFloat32String + } + return OpStructFieldFloat32 + case OpFloat32Ptr: + if isString { + return OpStructFieldFloat32PtrString + } + return OpStructFieldFloat32Ptr + case OpFloat64: + if isString { + return OpStructFieldFloat64String + } + return OpStructFieldFloat64 + case OpFloat64Ptr: + if isString { + return OpStructFieldFloat64PtrString + } + return OpStructFieldFloat64Ptr + case OpString: + if isString { + return OpStructFieldStringString + } + return OpStructFieldString + case OpStringPtr: + if isString { + return OpStructFieldStringPtrString + } + return OpStructFieldStringPtr + case OpNumber: + if isString { + return OpStructFieldNumberString + } + return OpStructFieldNumber + case OpNumberPtr: + if isString { + return OpStructFieldNumberPtrString + } + return OpStructFieldNumberPtr + case OpBool: + if isString { + return OpStructFieldBoolString + } + return OpStructFieldBool + case OpBoolPtr: + if isString { + return OpStructFieldBoolPtrString + } + return OpStructFieldBoolPtr + case OpBytes: + return OpStructFieldBytes + case OpBytesPtr: + return OpStructFieldBytesPtr + case OpMap: + return OpStructFieldMap + case OpMapPtr: + c.Op = OpMap + return OpStructFieldMapPtr + case OpArray: + return OpStructFieldArray + case OpArrayPtr: + c.Op = OpArray + return OpStructFieldArrayPtr + case OpSlice: + return OpStructFieldSlice + case OpSlicePtr: + c.Op = OpSlice + return OpStructFieldSlicePtr + case OpMarshalJSON: + return OpStructFieldMarshalJSON + case OpMarshalJSONPtr: + return OpStructFieldMarshalJSONPtr + case OpMarshalText: + return OpStructFieldMarshalText + case OpMarshalTextPtr: + return OpStructFieldMarshalTextPtr + } + return OpStructField +} + +func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode { + return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ)) +} + +func opcodeOffset(idx int) uint32 { + return uint32(idx) * uintptrSize +} + +func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode { + addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{}) + return *(**Opcode)(unsafe.Pointer(&addr)) +} + +func copyOpcode(code *Opcode) *Opcode { + codeNum := ToEndCode(code).DisplayIdx + 1 + codeSlice := make([]Opcode, codeNum) + head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data) + ptr := head + c := code + for { + *ptr = Opcode{ + Op: c.Op, + Key: c.Key, + PtrNum: c.PtrNum, + NumBitSize: c.NumBitSize, + Flags: c.Flags, + Idx: c.Idx, + Offset: c.Offset, + Type: c.Type, + FieldQuery: c.FieldQuery, + DisplayIdx: c.DisplayIdx, + DisplayKey: c.DisplayKey, + ElemIdx: c.ElemIdx, + Length: c.Length, + Size: c.Size, + Indent: c.Indent, + Jmp: c.Jmp, + } + if c.End != nil { + ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx) + } + if c.NextField != nil { + ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx) + } + if c.Next != nil { + ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx) + } + if c.IsEnd() { + break + } + ptr = getCodeAddrByIdx(head, c.DisplayIdx+1) + c = c.IterNext() + } + return head +} + +func setTotalLengthToInterfaceOp(code *Opcode) { + for c := code; !c.IsEnd(); { + if c.Op == OpInterface || c.Op == OpInterfacePtr { + c.Length = uint32(code.TotalLength()) + } + c = c.IterNext() + } +} + +func ToEndCode(code *Opcode) *Opcode { + c := code + for !c.IsEnd() { + c = c.IterNext() + } + return c +} + +func copyToInterfaceOpcode(code *Opcode) *Opcode { + copied := copyOpcode(code) + c := copied + c = ToEndCode(c) + c.Idx += uintptrSize + c.ElemIdx = c.Idx + uintptrSize + c.Length = c.Idx + 2*uintptrSize + c.Op = OpInterfaceEnd + return copied +} + +func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode { + return &Opcode{ + Op: op, + Idx: opcodeOffset(ctx.ptrIndex), + Next: next, + Type: typ, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode { + return newOpCodeWithNext(ctx, typ, OpEnd, nil) +} + +func (c *Opcode) TotalLength() int { + var idx int + code := c + for !code.IsEnd() { + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + if code.Op == OpRecursiveEnd { + break + } + code = code.IterNext() + } + maxIdx := int(code.MaxIdx() / uintptrSize) + if idx < maxIdx { + idx = maxIdx + } + return idx + 1 +} + +func (c *Opcode) dumpHead(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayHead { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + ) +} + +func (c *Opcode) dumpMapHead(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpMapEnd(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpElem(code *Opcode) string { + var length uint32 + if code.Op.CodeType() == CodeArrayElem { + length = code.Length + } else { + length = code.Length / uintptrSize + } + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.ElemIdx/uintptrSize, + length, + code.Size, + ) +} + +func (c *Opcode) dumpField(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d][key:%s][offset:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + code.DisplayKey, + code.Offset, + ) +} + +func (c *Opcode) dumpKey(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) dumpValue(code *Opcode) string { + return fmt.Sprintf( + `[%03d]%s%s ([idx:%d])`, + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + ) +} + +func (c *Opcode) Dump() string { + codes := []string{} + for code := c; !code.IsEnd(); { + switch code.Op.CodeType() { + case CodeSliceHead: + codes = append(codes, c.dumpHead(code)) + code = code.Next + case CodeMapHead: + codes = append(codes, c.dumpMapHead(code)) + code = code.Next + case CodeArrayElem, CodeSliceElem: + codes = append(codes, c.dumpElem(code)) + code = code.End + case CodeMapKey: + codes = append(codes, c.dumpKey(code)) + code = code.End + case CodeMapValue: + codes = append(codes, c.dumpValue(code)) + code = code.Next + case CodeMapEnd: + codes = append(codes, c.dumpMapEnd(code)) + code = code.Next + case CodeStructField: + codes = append(codes, c.dumpField(code)) + code = code.Next + case CodeStructEnd: + codes = append(codes, c.dumpField(code)) + code = code.Next + default: + codes = append(codes, fmt.Sprintf( + "[%03d]%s%s ([idx:%d])", + code.DisplayIdx, + strings.Repeat("-", int(code.Indent)), + code.Op, + code.Idx/uintptrSize, + )) + code = code.Next + } + } + return strings.Join(codes, "\n") +} + +func (c *Opcode) DumpDOT() string { + type edge struct { + from, to *Opcode + label string + weight int + } + var edges []edge + + b := &bytes.Buffer{} + fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type) + fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];") + for code := c; !code.IsEnd(); { + label := code.Op.String() + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label) + if p := code.Next; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "Next", + weight: 10, + }) + } + if p := code.NextField; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "NextField", + weight: 2, + }) + } + if p := code.End; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "End", + weight: 1, + }) + } + if p := code.Jmp; p != nil { + edges = append(edges, edge{ + from: code, + to: p.Code, + label: "Jmp", + weight: 1, + }) + } + + switch code.Op.CodeType() { + case CodeSliceHead: + code = code.Next + case CodeMapHead: + code = code.Next + case CodeArrayElem, CodeSliceElem: + code = code.End + case CodeMapKey: + code = code.End + case CodeMapValue: + code = code.Next + case CodeMapEnd: + code = code.Next + case CodeStructField: + code = code.Next + case CodeStructEnd: + code = code.Next + default: + code = code.Next + } + if code.IsEnd() { + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String()) + } + } + sort.Slice(edges, func(i, j int) bool { + return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx + }) + for _, e := range edges { + fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight) + } + fmt.Fprint(b, "}") + return b.String() +} + +func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + length := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpSlice, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Length: length, + Indent: ctx.indent, + } +} + +func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode { + return &Opcode{ + Op: OpSliceElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: head.Length, + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + elemIdx := opcodeOffset(ctx.ptrIndex) + return &Opcode{ + Op: OpArray, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: elemIdx, + Indent: ctx.indent, + Length: uint32(alen), + } +} + +func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode { + return &Opcode{ + Op: OpArrayElem, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + ElemIdx: head.ElemIdx, + Length: uint32(length), + Indent: ctx.indent, + Size: uint32(size), + } +} + +func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { + idx := opcodeOffset(ctx.ptrIndex) + ctx.incPtrIndex() + return &Opcode{ + Op: OpMap, + Type: typ, + Idx: idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapKey, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapValue, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + } +} + +func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { + return &Opcode{ + Op: OpMapEnd, + Type: typ, + Idx: head.Idx, + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Next: newEndOp(ctx, typ), + } +} + +func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode { + return &Opcode{ + Op: OpRecursive, + Type: typ, + Idx: opcodeOffset(ctx.ptrIndex), + Next: newEndOp(ctx, typ), + DisplayIdx: ctx.opcodeIndex, + Indent: ctx.indent, + Jmp: jmp, + } +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go new file mode 100644 index 0000000000000..12c58e46c01a9 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go @@ -0,0 +1,48 @@ +package encoder + +import ( + "context" + "io" +) + +type OptionFlag uint8 + +const ( + HTMLEscapeOption OptionFlag = 1 << iota + IndentOption + UnorderedMapOption + DebugOption + ColorizeOption + ContextOption + NormalizeUTF8Option + FieldQueryOption +) + +type Option struct { + Flag OptionFlag + ColorScheme *ColorScheme + Context context.Context + DebugOut io.Writer + DebugDOTOut io.WriteCloser +} + +type EncodeFormat struct { + Header string + Footer string +} + +type EncodeFormatScheme struct { + Int EncodeFormat + Uint EncodeFormat + Float EncodeFormat + Bool EncodeFormat + String EncodeFormat + Binary EncodeFormat + ObjectKey EncodeFormat + Null EncodeFormat +} + +type ( + ColorScheme = EncodeFormatScheme + ColorFormat = EncodeFormat +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/optype.go b/vendor/github.com/goccy/go-json/internal/encoder/optype.go new file mode 100644 index 0000000000000..5c1241b47d00b --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/optype.go @@ -0,0 +1,932 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package encoder + +import ( + "strings" +) + +type CodeType int + +const ( + CodeOp CodeType = 0 + CodeArrayHead CodeType = 1 + CodeArrayElem CodeType = 2 + CodeSliceHead CodeType = 3 + CodeSliceElem CodeType = 4 + CodeMapHead CodeType = 5 + CodeMapKey CodeType = 6 + CodeMapValue CodeType = 7 + CodeMapEnd CodeType = 8 + CodeRecursive CodeType = 9 + CodeStructField CodeType = 10 + CodeStructEnd CodeType = 11 +) + +var opTypeStrings = [400]string{ + "End", + "Interface", + "Ptr", + "SliceElem", + "SliceEnd", + "ArrayElem", + "ArrayEnd", + "MapKey", + "MapValue", + "MapEnd", + "Recursive", + "RecursivePtr", + "RecursiveEnd", + "InterfaceEnd", + "Int", + "Uint", + "Float32", + "Float64", + "Bool", + "String", + "Bytes", + "Number", + "Array", + "Map", + "Slice", + "Struct", + "MarshalJSON", + "MarshalText", + "IntString", + "UintString", + "Float32String", + "Float64String", + "BoolString", + "StringString", + "NumberString", + "IntPtr", + "UintPtr", + "Float32Ptr", + "Float64Ptr", + "BoolPtr", + "StringPtr", + "BytesPtr", + "NumberPtr", + "ArrayPtr", + "MapPtr", + "SlicePtr", + "MarshalJSONPtr", + "MarshalTextPtr", + "InterfacePtr", + "IntPtrString", + "UintPtrString", + "Float32PtrString", + "Float64PtrString", + "BoolPtrString", + "StringPtrString", + "NumberPtrString", + "StructHeadInt", + "StructHeadOmitEmptyInt", + "StructPtrHeadInt", + "StructPtrHeadOmitEmptyInt", + "StructHeadUint", + "StructHeadOmitEmptyUint", + "StructPtrHeadUint", + "StructPtrHeadOmitEmptyUint", + "StructHeadFloat32", + "StructHeadOmitEmptyFloat32", + "StructPtrHeadFloat32", + "StructPtrHeadOmitEmptyFloat32", + "StructHeadFloat64", + "StructHeadOmitEmptyFloat64", + "StructPtrHeadFloat64", + "StructPtrHeadOmitEmptyFloat64", + "StructHeadBool", + "StructHeadOmitEmptyBool", + "StructPtrHeadBool", + "StructPtrHeadOmitEmptyBool", + "StructHeadString", + "StructHeadOmitEmptyString", + "StructPtrHeadString", + "StructPtrHeadOmitEmptyString", + "StructHeadBytes", + "StructHeadOmitEmptyBytes", + "StructPtrHeadBytes", + "StructPtrHeadOmitEmptyBytes", + "StructHeadNumber", + "StructHeadOmitEmptyNumber", + "StructPtrHeadNumber", + "StructPtrHeadOmitEmptyNumber", + "StructHeadArray", + "StructHeadOmitEmptyArray", + "StructPtrHeadArray", + "StructPtrHeadOmitEmptyArray", + "StructHeadMap", + "StructHeadOmitEmptyMap", + "StructPtrHeadMap", + "StructPtrHeadOmitEmptyMap", + "StructHeadSlice", + "StructHeadOmitEmptySlice", + "StructPtrHeadSlice", + "StructPtrHeadOmitEmptySlice", + "StructHeadStruct", + "StructHeadOmitEmptyStruct", + "StructPtrHeadStruct", + "StructPtrHeadOmitEmptyStruct", + "StructHeadMarshalJSON", + "StructHeadOmitEmptyMarshalJSON", + "StructPtrHeadMarshalJSON", + "StructPtrHeadOmitEmptyMarshalJSON", + "StructHeadMarshalText", + "StructHeadOmitEmptyMarshalText", + "StructPtrHeadMarshalText", + "StructPtrHeadOmitEmptyMarshalText", + "StructHeadIntString", + "StructHeadOmitEmptyIntString", + "StructPtrHeadIntString", + "StructPtrHeadOmitEmptyIntString", + "StructHeadUintString", + "StructHeadOmitEmptyUintString", + "StructPtrHeadUintString", + "StructPtrHeadOmitEmptyUintString", + "StructHeadFloat32String", + "StructHeadOmitEmptyFloat32String", + "StructPtrHeadFloat32String", + "StructPtrHeadOmitEmptyFloat32String", + "StructHeadFloat64String", + "StructHeadOmitEmptyFloat64String", + "StructPtrHeadFloat64String", + "StructPtrHeadOmitEmptyFloat64String", + "StructHeadBoolString", + "StructHeadOmitEmptyBoolString", + "StructPtrHeadBoolString", + "StructPtrHeadOmitEmptyBoolString", + "StructHeadStringString", + "StructHeadOmitEmptyStringString", + "StructPtrHeadStringString", + "StructPtrHeadOmitEmptyStringString", + "StructHeadNumberString", + "StructHeadOmitEmptyNumberString", + "StructPtrHeadNumberString", + "StructPtrHeadOmitEmptyNumberString", + "StructHeadIntPtr", + "StructHeadOmitEmptyIntPtr", + "StructPtrHeadIntPtr", + "StructPtrHeadOmitEmptyIntPtr", + "StructHeadUintPtr", + "StructHeadOmitEmptyUintPtr", + "StructPtrHeadUintPtr", + "StructPtrHeadOmitEmptyUintPtr", + "StructHeadFloat32Ptr", + "StructHeadOmitEmptyFloat32Ptr", + "StructPtrHeadFloat32Ptr", + "StructPtrHeadOmitEmptyFloat32Ptr", + "StructHeadFloat64Ptr", + "StructHeadOmitEmptyFloat64Ptr", + "StructPtrHeadFloat64Ptr", + "StructPtrHeadOmitEmptyFloat64Ptr", + "StructHeadBoolPtr", + "StructHeadOmitEmptyBoolPtr", + "StructPtrHeadBoolPtr", + "StructPtrHeadOmitEmptyBoolPtr", + "StructHeadStringPtr", + "StructHeadOmitEmptyStringPtr", + "StructPtrHeadStringPtr", + "StructPtrHeadOmitEmptyStringPtr", + "StructHeadBytesPtr", + "StructHeadOmitEmptyBytesPtr", + "StructPtrHeadBytesPtr", + "StructPtrHeadOmitEmptyBytesPtr", + "StructHeadNumberPtr", + "StructHeadOmitEmptyNumberPtr", + "StructPtrHeadNumberPtr", + "StructPtrHeadOmitEmptyNumberPtr", + "StructHeadArrayPtr", + "StructHeadOmitEmptyArrayPtr", + "StructPtrHeadArrayPtr", + "StructPtrHeadOmitEmptyArrayPtr", + "StructHeadMapPtr", + "StructHeadOmitEmptyMapPtr", + "StructPtrHeadMapPtr", + "StructPtrHeadOmitEmptyMapPtr", + "StructHeadSlicePtr", + "StructHeadOmitEmptySlicePtr", + "StructPtrHeadSlicePtr", + "StructPtrHeadOmitEmptySlicePtr", + "StructHeadMarshalJSONPtr", + "StructHeadOmitEmptyMarshalJSONPtr", + "StructPtrHeadMarshalJSONPtr", + "StructPtrHeadOmitEmptyMarshalJSONPtr", + "StructHeadMarshalTextPtr", + "StructHeadOmitEmptyMarshalTextPtr", + "StructPtrHeadMarshalTextPtr", + "StructPtrHeadOmitEmptyMarshalTextPtr", + "StructHeadInterfacePtr", + "StructHeadOmitEmptyInterfacePtr", + "StructPtrHeadInterfacePtr", + "StructPtrHeadOmitEmptyInterfacePtr", + "StructHeadIntPtrString", + "StructHeadOmitEmptyIntPtrString", + "StructPtrHeadIntPtrString", + "StructPtrHeadOmitEmptyIntPtrString", + "StructHeadUintPtrString", + "StructHeadOmitEmptyUintPtrString", + "StructPtrHeadUintPtrString", + "StructPtrHeadOmitEmptyUintPtrString", + "StructHeadFloat32PtrString", + "StructHeadOmitEmptyFloat32PtrString", + "StructPtrHeadFloat32PtrString", + "StructPtrHeadOmitEmptyFloat32PtrString", + "StructHeadFloat64PtrString", + "StructHeadOmitEmptyFloat64PtrString", + "StructPtrHeadFloat64PtrString", + "StructPtrHeadOmitEmptyFloat64PtrString", + "StructHeadBoolPtrString", + "StructHeadOmitEmptyBoolPtrString", + "StructPtrHeadBoolPtrString", + "StructPtrHeadOmitEmptyBoolPtrString", + "StructHeadStringPtrString", + "StructHeadOmitEmptyStringPtrString", + "StructPtrHeadStringPtrString", + "StructPtrHeadOmitEmptyStringPtrString", + "StructHeadNumberPtrString", + "StructHeadOmitEmptyNumberPtrString", + "StructPtrHeadNumberPtrString", + "StructPtrHeadOmitEmptyNumberPtrString", + "StructHead", + "StructHeadOmitEmpty", + "StructPtrHead", + "StructPtrHeadOmitEmpty", + "StructFieldInt", + "StructFieldOmitEmptyInt", + "StructEndInt", + "StructEndOmitEmptyInt", + "StructFieldUint", + "StructFieldOmitEmptyUint", + "StructEndUint", + "StructEndOmitEmptyUint", + "StructFieldFloat32", + "StructFieldOmitEmptyFloat32", + "StructEndFloat32", + "StructEndOmitEmptyFloat32", + "StructFieldFloat64", + "StructFieldOmitEmptyFloat64", + "StructEndFloat64", + "StructEndOmitEmptyFloat64", + "StructFieldBool", + "StructFieldOmitEmptyBool", + "StructEndBool", + "StructEndOmitEmptyBool", + "StructFieldString", + "StructFieldOmitEmptyString", + "StructEndString", + "StructEndOmitEmptyString", + "StructFieldBytes", + "StructFieldOmitEmptyBytes", + "StructEndBytes", + "StructEndOmitEmptyBytes", + "StructFieldNumber", + "StructFieldOmitEmptyNumber", + "StructEndNumber", + "StructEndOmitEmptyNumber", + "StructFieldArray", + "StructFieldOmitEmptyArray", + "StructEndArray", + "StructEndOmitEmptyArray", + "StructFieldMap", + "StructFieldOmitEmptyMap", + "StructEndMap", + "StructEndOmitEmptyMap", + "StructFieldSlice", + "StructFieldOmitEmptySlice", + "StructEndSlice", + "StructEndOmitEmptySlice", + "StructFieldStruct", + "StructFieldOmitEmptyStruct", + "StructEndStruct", + "StructEndOmitEmptyStruct", + "StructFieldMarshalJSON", + "StructFieldOmitEmptyMarshalJSON", + "StructEndMarshalJSON", + "StructEndOmitEmptyMarshalJSON", + "StructFieldMarshalText", + "StructFieldOmitEmptyMarshalText", + "StructEndMarshalText", + "StructEndOmitEmptyMarshalText", + "StructFieldIntString", + "StructFieldOmitEmptyIntString", + "StructEndIntString", + "StructEndOmitEmptyIntString", + "StructFieldUintString", + "StructFieldOmitEmptyUintString", + "StructEndUintString", + "StructEndOmitEmptyUintString", + "StructFieldFloat32String", + "StructFieldOmitEmptyFloat32String", + "StructEndFloat32String", + "StructEndOmitEmptyFloat32String", + "StructFieldFloat64String", + "StructFieldOmitEmptyFloat64String", + "StructEndFloat64String", + "StructEndOmitEmptyFloat64String", + "StructFieldBoolString", + "StructFieldOmitEmptyBoolString", + "StructEndBoolString", + "StructEndOmitEmptyBoolString", + "StructFieldStringString", + "StructFieldOmitEmptyStringString", + "StructEndStringString", + "StructEndOmitEmptyStringString", + "StructFieldNumberString", + "StructFieldOmitEmptyNumberString", + "StructEndNumberString", + "StructEndOmitEmptyNumberString", + "StructFieldIntPtr", + "StructFieldOmitEmptyIntPtr", + "StructEndIntPtr", + "StructEndOmitEmptyIntPtr", + "StructFieldUintPtr", + "StructFieldOmitEmptyUintPtr", + "StructEndUintPtr", + "StructEndOmitEmptyUintPtr", + "StructFieldFloat32Ptr", + "StructFieldOmitEmptyFloat32Ptr", + "StructEndFloat32Ptr", + "StructEndOmitEmptyFloat32Ptr", + "StructFieldFloat64Ptr", + "StructFieldOmitEmptyFloat64Ptr", + "StructEndFloat64Ptr", + "StructEndOmitEmptyFloat64Ptr", + "StructFieldBoolPtr", + "StructFieldOmitEmptyBoolPtr", + "StructEndBoolPtr", + "StructEndOmitEmptyBoolPtr", + "StructFieldStringPtr", + "StructFieldOmitEmptyStringPtr", + "StructEndStringPtr", + "StructEndOmitEmptyStringPtr", + "StructFieldBytesPtr", + "StructFieldOmitEmptyBytesPtr", + "StructEndBytesPtr", + "StructEndOmitEmptyBytesPtr", + "StructFieldNumberPtr", + "StructFieldOmitEmptyNumberPtr", + "StructEndNumberPtr", + "StructEndOmitEmptyNumberPtr", + "StructFieldArrayPtr", + "StructFieldOmitEmptyArrayPtr", + "StructEndArrayPtr", + "StructEndOmitEmptyArrayPtr", + "StructFieldMapPtr", + "StructFieldOmitEmptyMapPtr", + "StructEndMapPtr", + "StructEndOmitEmptyMapPtr", + "StructFieldSlicePtr", + "StructFieldOmitEmptySlicePtr", + "StructEndSlicePtr", + "StructEndOmitEmptySlicePtr", + "StructFieldMarshalJSONPtr", + "StructFieldOmitEmptyMarshalJSONPtr", + "StructEndMarshalJSONPtr", + "StructEndOmitEmptyMarshalJSONPtr", + "StructFieldMarshalTextPtr", + "StructFieldOmitEmptyMarshalTextPtr", + "StructEndMarshalTextPtr", + "StructEndOmitEmptyMarshalTextPtr", + "StructFieldInterfacePtr", + "StructFieldOmitEmptyInterfacePtr", + "StructEndInterfacePtr", + "StructEndOmitEmptyInterfacePtr", + "StructFieldIntPtrString", + "StructFieldOmitEmptyIntPtrString", + "StructEndIntPtrString", + "StructEndOmitEmptyIntPtrString", + "StructFieldUintPtrString", + "StructFieldOmitEmptyUintPtrString", + "StructEndUintPtrString", + "StructEndOmitEmptyUintPtrString", + "StructFieldFloat32PtrString", + "StructFieldOmitEmptyFloat32PtrString", + "StructEndFloat32PtrString", + "StructEndOmitEmptyFloat32PtrString", + "StructFieldFloat64PtrString", + "StructFieldOmitEmptyFloat64PtrString", + "StructEndFloat64PtrString", + "StructEndOmitEmptyFloat64PtrString", + "StructFieldBoolPtrString", + "StructFieldOmitEmptyBoolPtrString", + "StructEndBoolPtrString", + "StructEndOmitEmptyBoolPtrString", + "StructFieldStringPtrString", + "StructFieldOmitEmptyStringPtrString", + "StructEndStringPtrString", + "StructEndOmitEmptyStringPtrString", + "StructFieldNumberPtrString", + "StructFieldOmitEmptyNumberPtrString", + "StructEndNumberPtrString", + "StructEndOmitEmptyNumberPtrString", + "StructField", + "StructFieldOmitEmpty", + "StructEnd", + "StructEndOmitEmpty", +} + +type OpType uint16 + +const ( + OpEnd OpType = 0 + OpInterface OpType = 1 + OpPtr OpType = 2 + OpSliceElem OpType = 3 + OpSliceEnd OpType = 4 + OpArrayElem OpType = 5 + OpArrayEnd OpType = 6 + OpMapKey OpType = 7 + OpMapValue OpType = 8 + OpMapEnd OpType = 9 + OpRecursive OpType = 10 + OpRecursivePtr OpType = 11 + OpRecursiveEnd OpType = 12 + OpInterfaceEnd OpType = 13 + OpInt OpType = 14 + OpUint OpType = 15 + OpFloat32 OpType = 16 + OpFloat64 OpType = 17 + OpBool OpType = 18 + OpString OpType = 19 + OpBytes OpType = 20 + OpNumber OpType = 21 + OpArray OpType = 22 + OpMap OpType = 23 + OpSlice OpType = 24 + OpStruct OpType = 25 + OpMarshalJSON OpType = 26 + OpMarshalText OpType = 27 + OpIntString OpType = 28 + OpUintString OpType = 29 + OpFloat32String OpType = 30 + OpFloat64String OpType = 31 + OpBoolString OpType = 32 + OpStringString OpType = 33 + OpNumberString OpType = 34 + OpIntPtr OpType = 35 + OpUintPtr OpType = 36 + OpFloat32Ptr OpType = 37 + OpFloat64Ptr OpType = 38 + OpBoolPtr OpType = 39 + OpStringPtr OpType = 40 + OpBytesPtr OpType = 41 + OpNumberPtr OpType = 42 + OpArrayPtr OpType = 43 + OpMapPtr OpType = 44 + OpSlicePtr OpType = 45 + OpMarshalJSONPtr OpType = 46 + OpMarshalTextPtr OpType = 47 + OpInterfacePtr OpType = 48 + OpIntPtrString OpType = 49 + OpUintPtrString OpType = 50 + OpFloat32PtrString OpType = 51 + OpFloat64PtrString OpType = 52 + OpBoolPtrString OpType = 53 + OpStringPtrString OpType = 54 + OpNumberPtrString OpType = 55 + OpStructHeadInt OpType = 56 + OpStructHeadOmitEmptyInt OpType = 57 + OpStructPtrHeadInt OpType = 58 + OpStructPtrHeadOmitEmptyInt OpType = 59 + OpStructHeadUint OpType = 60 + OpStructHeadOmitEmptyUint OpType = 61 + OpStructPtrHeadUint OpType = 62 + OpStructPtrHeadOmitEmptyUint OpType = 63 + OpStructHeadFloat32 OpType = 64 + OpStructHeadOmitEmptyFloat32 OpType = 65 + OpStructPtrHeadFloat32 OpType = 66 + OpStructPtrHeadOmitEmptyFloat32 OpType = 67 + OpStructHeadFloat64 OpType = 68 + OpStructHeadOmitEmptyFloat64 OpType = 69 + OpStructPtrHeadFloat64 OpType = 70 + OpStructPtrHeadOmitEmptyFloat64 OpType = 71 + OpStructHeadBool OpType = 72 + OpStructHeadOmitEmptyBool OpType = 73 + OpStructPtrHeadBool OpType = 74 + OpStructPtrHeadOmitEmptyBool OpType = 75 + OpStructHeadString OpType = 76 + OpStructHeadOmitEmptyString OpType = 77 + OpStructPtrHeadString OpType = 78 + OpStructPtrHeadOmitEmptyString OpType = 79 + OpStructHeadBytes OpType = 80 + OpStructHeadOmitEmptyBytes OpType = 81 + OpStructPtrHeadBytes OpType = 82 + OpStructPtrHeadOmitEmptyBytes OpType = 83 + OpStructHeadNumber OpType = 84 + OpStructHeadOmitEmptyNumber OpType = 85 + OpStructPtrHeadNumber OpType = 86 + OpStructPtrHeadOmitEmptyNumber OpType = 87 + OpStructHeadArray OpType = 88 + OpStructHeadOmitEmptyArray OpType = 89 + OpStructPtrHeadArray OpType = 90 + OpStructPtrHeadOmitEmptyArray OpType = 91 + OpStructHeadMap OpType = 92 + OpStructHeadOmitEmptyMap OpType = 93 + OpStructPtrHeadMap OpType = 94 + OpStructPtrHeadOmitEmptyMap OpType = 95 + OpStructHeadSlice OpType = 96 + OpStructHeadOmitEmptySlice OpType = 97 + OpStructPtrHeadSlice OpType = 98 + OpStructPtrHeadOmitEmptySlice OpType = 99 + OpStructHeadStruct OpType = 100 + OpStructHeadOmitEmptyStruct OpType = 101 + OpStructPtrHeadStruct OpType = 102 + OpStructPtrHeadOmitEmptyStruct OpType = 103 + OpStructHeadMarshalJSON OpType = 104 + OpStructHeadOmitEmptyMarshalJSON OpType = 105 + OpStructPtrHeadMarshalJSON OpType = 106 + OpStructPtrHeadOmitEmptyMarshalJSON OpType = 107 + OpStructHeadMarshalText OpType = 108 + OpStructHeadOmitEmptyMarshalText OpType = 109 + OpStructPtrHeadMarshalText OpType = 110 + OpStructPtrHeadOmitEmptyMarshalText OpType = 111 + OpStructHeadIntString OpType = 112 + OpStructHeadOmitEmptyIntString OpType = 113 + OpStructPtrHeadIntString OpType = 114 + OpStructPtrHeadOmitEmptyIntString OpType = 115 + OpStructHeadUintString OpType = 116 + OpStructHeadOmitEmptyUintString OpType = 117 + OpStructPtrHeadUintString OpType = 118 + OpStructPtrHeadOmitEmptyUintString OpType = 119 + OpStructHeadFloat32String OpType = 120 + OpStructHeadOmitEmptyFloat32String OpType = 121 + OpStructPtrHeadFloat32String OpType = 122 + OpStructPtrHeadOmitEmptyFloat32String OpType = 123 + OpStructHeadFloat64String OpType = 124 + OpStructHeadOmitEmptyFloat64String OpType = 125 + OpStructPtrHeadFloat64String OpType = 126 + OpStructPtrHeadOmitEmptyFloat64String OpType = 127 + OpStructHeadBoolString OpType = 128 + OpStructHeadOmitEmptyBoolString OpType = 129 + OpStructPtrHeadBoolString OpType = 130 + OpStructPtrHeadOmitEmptyBoolString OpType = 131 + OpStructHeadStringString OpType = 132 + OpStructHeadOmitEmptyStringString OpType = 133 + OpStructPtrHeadStringString OpType = 134 + OpStructPtrHeadOmitEmptyStringString OpType = 135 + OpStructHeadNumberString OpType = 136 + OpStructHeadOmitEmptyNumberString OpType = 137 + OpStructPtrHeadNumberString OpType = 138 + OpStructPtrHeadOmitEmptyNumberString OpType = 139 + OpStructHeadIntPtr OpType = 140 + OpStructHeadOmitEmptyIntPtr OpType = 141 + OpStructPtrHeadIntPtr OpType = 142 + OpStructPtrHeadOmitEmptyIntPtr OpType = 143 + OpStructHeadUintPtr OpType = 144 + OpStructHeadOmitEmptyUintPtr OpType = 145 + OpStructPtrHeadUintPtr OpType = 146 + OpStructPtrHeadOmitEmptyUintPtr OpType = 147 + OpStructHeadFloat32Ptr OpType = 148 + OpStructHeadOmitEmptyFloat32Ptr OpType = 149 + OpStructPtrHeadFloat32Ptr OpType = 150 + OpStructPtrHeadOmitEmptyFloat32Ptr OpType = 151 + OpStructHeadFloat64Ptr OpType = 152 + OpStructHeadOmitEmptyFloat64Ptr OpType = 153 + OpStructPtrHeadFloat64Ptr OpType = 154 + OpStructPtrHeadOmitEmptyFloat64Ptr OpType = 155 + OpStructHeadBoolPtr OpType = 156 + OpStructHeadOmitEmptyBoolPtr OpType = 157 + OpStructPtrHeadBoolPtr OpType = 158 + OpStructPtrHeadOmitEmptyBoolPtr OpType = 159 + OpStructHeadStringPtr OpType = 160 + OpStructHeadOmitEmptyStringPtr OpType = 161 + OpStructPtrHeadStringPtr OpType = 162 + OpStructPtrHeadOmitEmptyStringPtr OpType = 163 + OpStructHeadBytesPtr OpType = 164 + OpStructHeadOmitEmptyBytesPtr OpType = 165 + OpStructPtrHeadBytesPtr OpType = 166 + OpStructPtrHeadOmitEmptyBytesPtr OpType = 167 + OpStructHeadNumberPtr OpType = 168 + OpStructHeadOmitEmptyNumberPtr OpType = 169 + OpStructPtrHeadNumberPtr OpType = 170 + OpStructPtrHeadOmitEmptyNumberPtr OpType = 171 + OpStructHeadArrayPtr OpType = 172 + OpStructHeadOmitEmptyArrayPtr OpType = 173 + OpStructPtrHeadArrayPtr OpType = 174 + OpStructPtrHeadOmitEmptyArrayPtr OpType = 175 + OpStructHeadMapPtr OpType = 176 + OpStructHeadOmitEmptyMapPtr OpType = 177 + OpStructPtrHeadMapPtr OpType = 178 + OpStructPtrHeadOmitEmptyMapPtr OpType = 179 + OpStructHeadSlicePtr OpType = 180 + OpStructHeadOmitEmptySlicePtr OpType = 181 + OpStructPtrHeadSlicePtr OpType = 182 + OpStructPtrHeadOmitEmptySlicePtr OpType = 183 + OpStructHeadMarshalJSONPtr OpType = 184 + OpStructHeadOmitEmptyMarshalJSONPtr OpType = 185 + OpStructPtrHeadMarshalJSONPtr OpType = 186 + OpStructPtrHeadOmitEmptyMarshalJSONPtr OpType = 187 + OpStructHeadMarshalTextPtr OpType = 188 + OpStructHeadOmitEmptyMarshalTextPtr OpType = 189 + OpStructPtrHeadMarshalTextPtr OpType = 190 + OpStructPtrHeadOmitEmptyMarshalTextPtr OpType = 191 + OpStructHeadInterfacePtr OpType = 192 + OpStructHeadOmitEmptyInterfacePtr OpType = 193 + OpStructPtrHeadInterfacePtr OpType = 194 + OpStructPtrHeadOmitEmptyInterfacePtr OpType = 195 + OpStructHeadIntPtrString OpType = 196 + OpStructHeadOmitEmptyIntPtrString OpType = 197 + OpStructPtrHeadIntPtrString OpType = 198 + OpStructPtrHeadOmitEmptyIntPtrString OpType = 199 + OpStructHeadUintPtrString OpType = 200 + OpStructHeadOmitEmptyUintPtrString OpType = 201 + OpStructPtrHeadUintPtrString OpType = 202 + OpStructPtrHeadOmitEmptyUintPtrString OpType = 203 + OpStructHeadFloat32PtrString OpType = 204 + OpStructHeadOmitEmptyFloat32PtrString OpType = 205 + OpStructPtrHeadFloat32PtrString OpType = 206 + OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207 + OpStructHeadFloat64PtrString OpType = 208 + OpStructHeadOmitEmptyFloat64PtrString OpType = 209 + OpStructPtrHeadFloat64PtrString OpType = 210 + OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211 + OpStructHeadBoolPtrString OpType = 212 + OpStructHeadOmitEmptyBoolPtrString OpType = 213 + OpStructPtrHeadBoolPtrString OpType = 214 + OpStructPtrHeadOmitEmptyBoolPtrString OpType = 215 + OpStructHeadStringPtrString OpType = 216 + OpStructHeadOmitEmptyStringPtrString OpType = 217 + OpStructPtrHeadStringPtrString OpType = 218 + OpStructPtrHeadOmitEmptyStringPtrString OpType = 219 + OpStructHeadNumberPtrString OpType = 220 + OpStructHeadOmitEmptyNumberPtrString OpType = 221 + OpStructPtrHeadNumberPtrString OpType = 222 + OpStructPtrHeadOmitEmptyNumberPtrString OpType = 223 + OpStructHead OpType = 224 + OpStructHeadOmitEmpty OpType = 225 + OpStructPtrHead OpType = 226 + OpStructPtrHeadOmitEmpty OpType = 227 + OpStructFieldInt OpType = 228 + OpStructFieldOmitEmptyInt OpType = 229 + OpStructEndInt OpType = 230 + OpStructEndOmitEmptyInt OpType = 231 + OpStructFieldUint OpType = 232 + OpStructFieldOmitEmptyUint OpType = 233 + OpStructEndUint OpType = 234 + OpStructEndOmitEmptyUint OpType = 235 + OpStructFieldFloat32 OpType = 236 + OpStructFieldOmitEmptyFloat32 OpType = 237 + OpStructEndFloat32 OpType = 238 + OpStructEndOmitEmptyFloat32 OpType = 239 + OpStructFieldFloat64 OpType = 240 + OpStructFieldOmitEmptyFloat64 OpType = 241 + OpStructEndFloat64 OpType = 242 + OpStructEndOmitEmptyFloat64 OpType = 243 + OpStructFieldBool OpType = 244 + OpStructFieldOmitEmptyBool OpType = 245 + OpStructEndBool OpType = 246 + OpStructEndOmitEmptyBool OpType = 247 + OpStructFieldString OpType = 248 + OpStructFieldOmitEmptyString OpType = 249 + OpStructEndString OpType = 250 + OpStructEndOmitEmptyString OpType = 251 + OpStructFieldBytes OpType = 252 + OpStructFieldOmitEmptyBytes OpType = 253 + OpStructEndBytes OpType = 254 + OpStructEndOmitEmptyBytes OpType = 255 + OpStructFieldNumber OpType = 256 + OpStructFieldOmitEmptyNumber OpType = 257 + OpStructEndNumber OpType = 258 + OpStructEndOmitEmptyNumber OpType = 259 + OpStructFieldArray OpType = 260 + OpStructFieldOmitEmptyArray OpType = 261 + OpStructEndArray OpType = 262 + OpStructEndOmitEmptyArray OpType = 263 + OpStructFieldMap OpType = 264 + OpStructFieldOmitEmptyMap OpType = 265 + OpStructEndMap OpType = 266 + OpStructEndOmitEmptyMap OpType = 267 + OpStructFieldSlice OpType = 268 + OpStructFieldOmitEmptySlice OpType = 269 + OpStructEndSlice OpType = 270 + OpStructEndOmitEmptySlice OpType = 271 + OpStructFieldStruct OpType = 272 + OpStructFieldOmitEmptyStruct OpType = 273 + OpStructEndStruct OpType = 274 + OpStructEndOmitEmptyStruct OpType = 275 + OpStructFieldMarshalJSON OpType = 276 + OpStructFieldOmitEmptyMarshalJSON OpType = 277 + OpStructEndMarshalJSON OpType = 278 + OpStructEndOmitEmptyMarshalJSON OpType = 279 + OpStructFieldMarshalText OpType = 280 + OpStructFieldOmitEmptyMarshalText OpType = 281 + OpStructEndMarshalText OpType = 282 + OpStructEndOmitEmptyMarshalText OpType = 283 + OpStructFieldIntString OpType = 284 + OpStructFieldOmitEmptyIntString OpType = 285 + OpStructEndIntString OpType = 286 + OpStructEndOmitEmptyIntString OpType = 287 + OpStructFieldUintString OpType = 288 + OpStructFieldOmitEmptyUintString OpType = 289 + OpStructEndUintString OpType = 290 + OpStructEndOmitEmptyUintString OpType = 291 + OpStructFieldFloat32String OpType = 292 + OpStructFieldOmitEmptyFloat32String OpType = 293 + OpStructEndFloat32String OpType = 294 + OpStructEndOmitEmptyFloat32String OpType = 295 + OpStructFieldFloat64String OpType = 296 + OpStructFieldOmitEmptyFloat64String OpType = 297 + OpStructEndFloat64String OpType = 298 + OpStructEndOmitEmptyFloat64String OpType = 299 + OpStructFieldBoolString OpType = 300 + OpStructFieldOmitEmptyBoolString OpType = 301 + OpStructEndBoolString OpType = 302 + OpStructEndOmitEmptyBoolString OpType = 303 + OpStructFieldStringString OpType = 304 + OpStructFieldOmitEmptyStringString OpType = 305 + OpStructEndStringString OpType = 306 + OpStructEndOmitEmptyStringString OpType = 307 + OpStructFieldNumberString OpType = 308 + OpStructFieldOmitEmptyNumberString OpType = 309 + OpStructEndNumberString OpType = 310 + OpStructEndOmitEmptyNumberString OpType = 311 + OpStructFieldIntPtr OpType = 312 + OpStructFieldOmitEmptyIntPtr OpType = 313 + OpStructEndIntPtr OpType = 314 + OpStructEndOmitEmptyIntPtr OpType = 315 + OpStructFieldUintPtr OpType = 316 + OpStructFieldOmitEmptyUintPtr OpType = 317 + OpStructEndUintPtr OpType = 318 + OpStructEndOmitEmptyUintPtr OpType = 319 + OpStructFieldFloat32Ptr OpType = 320 + OpStructFieldOmitEmptyFloat32Ptr OpType = 321 + OpStructEndFloat32Ptr OpType = 322 + OpStructEndOmitEmptyFloat32Ptr OpType = 323 + OpStructFieldFloat64Ptr OpType = 324 + OpStructFieldOmitEmptyFloat64Ptr OpType = 325 + OpStructEndFloat64Ptr OpType = 326 + OpStructEndOmitEmptyFloat64Ptr OpType = 327 + OpStructFieldBoolPtr OpType = 328 + OpStructFieldOmitEmptyBoolPtr OpType = 329 + OpStructEndBoolPtr OpType = 330 + OpStructEndOmitEmptyBoolPtr OpType = 331 + OpStructFieldStringPtr OpType = 332 + OpStructFieldOmitEmptyStringPtr OpType = 333 + OpStructEndStringPtr OpType = 334 + OpStructEndOmitEmptyStringPtr OpType = 335 + OpStructFieldBytesPtr OpType = 336 + OpStructFieldOmitEmptyBytesPtr OpType = 337 + OpStructEndBytesPtr OpType = 338 + OpStructEndOmitEmptyBytesPtr OpType = 339 + OpStructFieldNumberPtr OpType = 340 + OpStructFieldOmitEmptyNumberPtr OpType = 341 + OpStructEndNumberPtr OpType = 342 + OpStructEndOmitEmptyNumberPtr OpType = 343 + OpStructFieldArrayPtr OpType = 344 + OpStructFieldOmitEmptyArrayPtr OpType = 345 + OpStructEndArrayPtr OpType = 346 + OpStructEndOmitEmptyArrayPtr OpType = 347 + OpStructFieldMapPtr OpType = 348 + OpStructFieldOmitEmptyMapPtr OpType = 349 + OpStructEndMapPtr OpType = 350 + OpStructEndOmitEmptyMapPtr OpType = 351 + OpStructFieldSlicePtr OpType = 352 + OpStructFieldOmitEmptySlicePtr OpType = 353 + OpStructEndSlicePtr OpType = 354 + OpStructEndOmitEmptySlicePtr OpType = 355 + OpStructFieldMarshalJSONPtr OpType = 356 + OpStructFieldOmitEmptyMarshalJSONPtr OpType = 357 + OpStructEndMarshalJSONPtr OpType = 358 + OpStructEndOmitEmptyMarshalJSONPtr OpType = 359 + OpStructFieldMarshalTextPtr OpType = 360 + OpStructFieldOmitEmptyMarshalTextPtr OpType = 361 + OpStructEndMarshalTextPtr OpType = 362 + OpStructEndOmitEmptyMarshalTextPtr OpType = 363 + OpStructFieldInterfacePtr OpType = 364 + OpStructFieldOmitEmptyInterfacePtr OpType = 365 + OpStructEndInterfacePtr OpType = 366 + OpStructEndOmitEmptyInterfacePtr OpType = 367 + OpStructFieldIntPtrString OpType = 368 + OpStructFieldOmitEmptyIntPtrString OpType = 369 + OpStructEndIntPtrString OpType = 370 + OpStructEndOmitEmptyIntPtrString OpType = 371 + OpStructFieldUintPtrString OpType = 372 + OpStructFieldOmitEmptyUintPtrString OpType = 373 + OpStructEndUintPtrString OpType = 374 + OpStructEndOmitEmptyUintPtrString OpType = 375 + OpStructFieldFloat32PtrString OpType = 376 + OpStructFieldOmitEmptyFloat32PtrString OpType = 377 + OpStructEndFloat32PtrString OpType = 378 + OpStructEndOmitEmptyFloat32PtrString OpType = 379 + OpStructFieldFloat64PtrString OpType = 380 + OpStructFieldOmitEmptyFloat64PtrString OpType = 381 + OpStructEndFloat64PtrString OpType = 382 + OpStructEndOmitEmptyFloat64PtrString OpType = 383 + OpStructFieldBoolPtrString OpType = 384 + OpStructFieldOmitEmptyBoolPtrString OpType = 385 + OpStructEndBoolPtrString OpType = 386 + OpStructEndOmitEmptyBoolPtrString OpType = 387 + OpStructFieldStringPtrString OpType = 388 + OpStructFieldOmitEmptyStringPtrString OpType = 389 + OpStructEndStringPtrString OpType = 390 + OpStructEndOmitEmptyStringPtrString OpType = 391 + OpStructFieldNumberPtrString OpType = 392 + OpStructFieldOmitEmptyNumberPtrString OpType = 393 + OpStructEndNumberPtrString OpType = 394 + OpStructEndOmitEmptyNumberPtrString OpType = 395 + OpStructField OpType = 396 + OpStructFieldOmitEmpty OpType = 397 + OpStructEnd OpType = 398 + OpStructEndOmitEmpty OpType = 399 +) + +func (t OpType) String() string { + if int(t) >= 400 { + return "" + } + return opTypeStrings[int(t)] +} + +func (t OpType) CodeType() CodeType { + if strings.Contains(t.String(), "Struct") { + if strings.Contains(t.String(), "End") { + return CodeStructEnd + } + return CodeStructField + } + switch t { + case OpArray, OpArrayPtr: + return CodeArrayHead + case OpArrayElem: + return CodeArrayElem + case OpSlice, OpSlicePtr: + return CodeSliceHead + case OpSliceElem: + return CodeSliceElem + case OpMap, OpMapPtr: + return CodeMapHead + case OpMapKey: + return CodeMapKey + case OpMapValue: + return CodeMapValue + case OpMapEnd: + return CodeMapEnd + } + + return CodeOp +} + +func (t OpType) HeadToPtrHead() OpType { + if strings.Index(t.String(), "PtrHead") > 0 { + return t + } + + idx := strings.Index(t.String(), "Head") + if idx == -1 { + return t + } + suffix := "PtrHead" + t.String()[idx+len("Head"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)+toPtrOffset).String(), suffix) { + return OpType(int(t) + toPtrOffset) + } + return t +} + +func (t OpType) HeadToOmitEmptyHead() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + + return t +} + +func (t OpType) PtrHeadToHead() OpType { + idx := strings.Index(t.String(), "PtrHead") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Ptr"):] + + const toPtrOffset = 2 + if strings.Contains(OpType(int(t)-toPtrOffset).String(), suffix) { + return OpType(int(t) - toPtrOffset) + } + return t +} + +func (t OpType) FieldToEnd() OpType { + idx := strings.Index(t.String(), "Field") + if idx == -1 { + return t + } + suffix := t.String()[idx+len("Field"):] + if suffix == "" || suffix == "OmitEmpty" { + return t + } + const toEndOffset = 2 + if strings.Contains(OpType(int(t)+toEndOffset).String(), "End"+suffix) { + return OpType(int(t) + toEndOffset) + } + return t +} + +func (t OpType) FieldToOmitEmptyField() OpType { + const toOmitEmptyOffset = 1 + if strings.Contains(OpType(int(t)+toOmitEmptyOffset).String(), "OmitEmpty") { + return OpType(int(t) + toOmitEmptyOffset) + } + return t +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/query.go b/vendor/github.com/goccy/go-json/internal/encoder/query.go new file mode 100644 index 0000000000000..1e1850cc153d0 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/query.go @@ -0,0 +1,135 @@ +package encoder + +import ( + "context" + "fmt" + "reflect" +) + +var ( + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +) + +type FieldQuery struct { + Name string + Fields []*FieldQuery + hash string +} + +func (q *FieldQuery) Hash() string { + if q.hash != "" { + return q.hash + } + b, _ := Marshal(q) + q.hash = string(b) + return q.hash +} + +func (q *FieldQuery) MarshalJSON() ([]byte, error) { + if q.Name != "" { + if len(q.Fields) > 0 { + return Marshal(map[string][]*FieldQuery{q.Name: q.Fields}) + } + return Marshal(q.Name) + } + return Marshal(q.Fields) +} + +func (q *FieldQuery) QueryString() (FieldQueryString, error) { + b, err := Marshal(q) + if err != nil { + return "", err + } + return FieldQueryString(b), nil +} + +type FieldQueryString string + +func (s FieldQueryString) Build() (*FieldQuery, error) { + var query interface{} + if err := Unmarshal([]byte(s), &query); err != nil { + return nil, err + } + return s.build(reflect.ValueOf(query)) +} + +func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) { + switch v.Type().Kind() { + case reflect.String: + return s.buildString(v) + case reflect.Map: + return s.buildMap(v) + case reflect.Slice: + return s.buildSlice(v) + case reflect.Interface: + return s.build(reflect.ValueOf(v.Interface())) + } + return nil, fmt.Errorf("failed to build field query") +} + +func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) { + b := []byte(v.String()) + switch b[0] { + case '[', '{': + var query interface{} + if err := Unmarshal(b, &query); err != nil { + return nil, err + } + if str, ok := query.(string); ok { + return &FieldQuery{Name: str}, nil + } + return s.build(reflect.ValueOf(query)) + } + return &FieldQuery{Name: string(b)}, nil +} + +func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) { + fields := make([]*FieldQuery, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + def, err := s.build(v.Index(i)) + if err != nil { + return nil, err + } + fields = append(fields, def) + } + return &FieldQuery{Fields: fields}, nil +} + +func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) { + keys := v.MapKeys() + if len(keys) != 1 { + return nil, fmt.Errorf("failed to build field query object") + } + key := keys[0] + if key.Type().Kind() != reflect.String { + return nil, fmt.Errorf("failed to build field query. invalid object key type") + } + name := key.String() + def, err := s.build(v.MapIndex(key)) + if err != nil { + return nil, err + } + return &FieldQuery{ + Name: name, + Fields: def.Fields, + }, nil +} + +type queryKey struct{} + +func FieldQueryFromContext(ctx context.Context) *FieldQuery { + query := ctx.Value(queryKey{}) + if query == nil { + return nil + } + q, ok := query.(*FieldQuery) + if !ok { + return nil + } + return q +} + +func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context { + return context.WithValue(ctx, queryKey{}, query) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string.go b/vendor/github.com/goccy/go-json/internal/encoder/string.go new file mode 100644 index 0000000000000..4abb84165e9cb --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string.go @@ -0,0 +1,483 @@ +// This files's string processing codes are inspired by https://github.com/segmentio/encoding. +// The license notation is as follows. +// +// # MIT License +// +// Copyright (c) 2019 Segment.io, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package encoder + +import ( + "math/bits" + "reflect" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +var hex = "0123456789abcdef" + +//nolint:govet +func stringToUint64Slice(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} + +func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte { + if ctx.Option.Flag&HTMLEscapeOption != 0 { + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedHTMLString(buf, s) + } + return appendHTMLString(buf, s) + } + if ctx.Option.Flag&NormalizeUTF8Option != 0 { + return appendNormalizedString(buf, s) + } + return appendString(buf, s) +} + +func appendNormalizedHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTMLNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTMLNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendHTMLString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) | + ((n ^ (lsb * '<')) - lsb) | + ((n ^ (lsb * '>')) - lsb) | + ((n ^ (lsb * '&')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeHTML[s[i]] { + j = i + goto ESCAPE_END + } + } + // no found any escape characters. + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeHTML[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} + +func appendNormalizedString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscapeNormalizeUTF8[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscapeNormalizeUTF8[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + state, size := decodeRuneInString(s[j:]) + switch state { + case runeErrorState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\ufffd`...) + i = j + 1 + j = j + 1 + continue + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + case lineSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2028`...) + i = j + 3 + j = j + 3 + continue + case paragraphSepState: + buf = append(buf, s[i:j]...) + buf = append(buf, `\u2029`...) + i = j + 3 + j = j + 3 + continue + } + j += size + } + + return append(append(buf, s[i:]...), '"') +} + +func appendString(buf []byte, s string) []byte { + valLen := len(s) + if valLen == 0 { + return append(buf, `""`...) + } + buf = append(buf, '"') + var ( + i, j int + ) + if valLen >= 8 { + chunks := stringToUint64Slice(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | (n - (lsb * 0x20)) | + ((n ^ (lsb * '"')) - lsb) | + ((n ^ (lsb * '\\')) - lsb) + if (mask & msb) != 0 { + j = bits.TrailingZeros64(mask&msb) / 8 + goto ESCAPE_END + } + } + valLen := len(s) + for i := len(chunks) * 8; i < valLen; i++ { + if needEscape[s[i]] { + j = i + goto ESCAPE_END + } + } + return append(append(buf, s...), '"') + } +ESCAPE_END: + for j < valLen { + c := s[j] + + if !needEscape[c] { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + buf = append(buf, s[i:j]...) + buf = append(buf, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F + buf = append(buf, s[i:j]...) + buf = append(buf, `\u00`...) + buf = append(buf, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + j++ + } + + return append(append(buf, s[i:]...), '"') +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/string_table.go b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go new file mode 100644 index 0000000000000..ebe42c92dfd87 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/string_table.go @@ -0,0 +1,415 @@ +package encoder + +var needEscapeHTMLNormalizeUTF8 = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeNormalizeUTF8 = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0x7f */ + 0x80: true, + 0x81: true, + 0x82: true, + 0x83: true, + 0x84: true, + 0x85: true, + 0x86: true, + 0x87: true, + 0x88: true, + 0x89: true, + 0x8a: true, + 0x8b: true, + 0x8c: true, + 0x8d: true, + 0x8e: true, + 0x8f: true, + 0x90: true, + 0x91: true, + 0x92: true, + 0x93: true, + 0x94: true, + 0x95: true, + 0x96: true, + 0x97: true, + 0x98: true, + 0x99: true, + 0x9a: true, + 0x9b: true, + 0x9c: true, + 0x9d: true, + 0x9e: true, + 0x9f: true, + 0xa0: true, + 0xa1: true, + 0xa2: true, + 0xa3: true, + 0xa4: true, + 0xa5: true, + 0xa6: true, + 0xa7: true, + 0xa8: true, + 0xa9: true, + 0xaa: true, + 0xab: true, + 0xac: true, + 0xad: true, + 0xae: true, + 0xaf: true, + 0xb0: true, + 0xb1: true, + 0xb2: true, + 0xb3: true, + 0xb4: true, + 0xb5: true, + 0xb6: true, + 0xb7: true, + 0xb8: true, + 0xb9: true, + 0xba: true, + 0xbb: true, + 0xbc: true, + 0xbd: true, + 0xbe: true, + 0xbf: true, + 0xc0: true, + 0xc1: true, + 0xc2: true, + 0xc3: true, + 0xc4: true, + 0xc5: true, + 0xc6: true, + 0xc7: true, + 0xc8: true, + 0xc9: true, + 0xca: true, + 0xcb: true, + 0xcc: true, + 0xcd: true, + 0xce: true, + 0xcf: true, + 0xd0: true, + 0xd1: true, + 0xd2: true, + 0xd3: true, + 0xd4: true, + 0xd5: true, + 0xd6: true, + 0xd7: true, + 0xd8: true, + 0xd9: true, + 0xda: true, + 0xdb: true, + 0xdc: true, + 0xdd: true, + 0xde: true, + 0xdf: true, + 0xe0: true, + 0xe1: true, + 0xe2: true, + 0xe3: true, + 0xe4: true, + 0xe5: true, + 0xe6: true, + 0xe7: true, + 0xe8: true, + 0xe9: true, + 0xea: true, + 0xeb: true, + 0xec: true, + 0xed: true, + 0xee: true, + 0xef: true, + 0xf0: true, + 0xf1: true, + 0xf2: true, + 0xf3: true, + 0xf4: true, + 0xf5: true, + 0xf6: true, + 0xf7: true, + 0xf8: true, + 0xf9: true, + 0xfa: true, + 0xfb: true, + 0xfc: true, + 0xfd: true, + 0xfe: true, + 0xff: true, +} + +var needEscapeHTML = [256]bool{ + '"': true, + '&': true, + '<': true, + '>': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} + +var needEscape = [256]bool{ + '"': true, + '\\': true, + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + 0x09: true, + 0x0a: true, + 0x0b: true, + 0x0c: true, + 0x0d: true, + 0x0e: true, + 0x0f: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1a: true, + 0x1b: true, + 0x1c: true, + 0x1d: true, + 0x1e: true, + 0x1f: true, + /* 0x20 - 0xff */ +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go new file mode 100644 index 0000000000000..82b6dd47f864c --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go @@ -0,0 +1,41 @@ +package vm + +import ( + "fmt" + "io" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + defer func() { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + if wc := ctx.Option.DebugDOTOut; wc != nil { + _, _ = io.WriteString(wc, code.DumpDOT()) + wc.Close() + ctx.Option.DebugDOTOut = nil + } + + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go new file mode 100644 index 0000000000000..65252b4a5cd76 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go @@ -0,0 +1,9 @@ +package vm + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go new file mode 100644 index 0000000000000..86291d7bb3779 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go @@ -0,0 +1,207 @@ +package vm + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key...) + b[len(b)-1] = ':' + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + b[len(b)-1] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalText(ctx, code, b, v) +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return append(b, code.Key...) +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go new file mode 100644 index 0000000000000..645d20f9fbe97 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go new file mode 100644 index 0000000000000..925f61ed8e690 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go new file mode 100644 index 0000000000000..12ec56c5bbd29 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go @@ -0,0 +1,9 @@ +package vm_color + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color_indent" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go new file mode 100644 index 0000000000000..33f29aee44815 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go @@ -0,0 +1,274 @@ +package vm_color + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder: opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + last := len(b) - 1 + b[last] = ':' + return b +} + +func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { + b = append(b, key[:len(key)-1]...) + b = append(b, ':') + return append(b, value...) +} + +func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + b = append(b, ',') + return b +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSON(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalText(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '[') +} + +func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = ']' + return append(b, ',') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',') +} + +func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + b[last] = '}' + return append(b, ',') +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':') +} + +func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { + return append(b, '}', ',') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last] == ',' { + b[last] = '}' + return appendComma(ctx, b) + } + return appendStructEnd(ctx, code, b) +} + +func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} +func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} +func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } +func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go new file mode 100644 index 0000000000000..a63e83e5505ac --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go new file mode 100644 index 0000000000000..dd4cd489e06d1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_color_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go new file mode 100644 index 0000000000000..2395abec975b8 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go @@ -0,0 +1,297 @@ +package vm_color_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendIndent = encoder.AppendIndent + appendStructEnd = encoder.AppendStructEndIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} + +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + b = encoder.AppendInt(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { + format := ctx.Option.ColorScheme.Uint + b = append(b, format.Header...) + b = encoder.AppendUint(ctx, b, p, code) + return append(b, format.Footer...) +} + +func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat32(ctx, b, v) + return append(b, format.Footer...) +} + +func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { + format := ctx.Option.ColorScheme.Float + b = append(b, format.Header...) + b = encoder.AppendFloat64(ctx, b, v) + return append(b, format.Footer...) +} + +func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + b = encoder.AppendString(ctx, b, v) + return append(b, format.Footer...) +} + +func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { + format := ctx.Option.ColorScheme.Binary + b = append(b, format.Header...) + b = encoder.AppendByteSlice(ctx, b, src) + return append(b, format.Footer...) +} + +func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { + format := ctx.Option.ColorScheme.Int + b = append(b, format.Header...) + bb, err := encoder.AppendNumber(ctx, b, n) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { + format := ctx.Option.ColorScheme.Bool + b = append(b, format.Header...) + if v { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + return append(b, format.Footer...) +} + +func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(b, format.Footer...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { + format := ctx.Option.ColorScheme.Null + b = append(b, format.Header...) + b = append(b, "null"...) + return append(append(b, format.Footer...), ',', '\n') +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + format := ctx.Option.ColorScheme.String + b = append(b, format.Header...) + bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v) + if err != nil { + return nil, err + } + return append(bb, format.Footer...), nil +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + + format := ctx.Option.ColorScheme.ObjectKey + b = append(b, format.Header...) + b = append(b, code.Key[:len(code.Key)-1]...) + b = append(b, format.Footer...) + + return append(b, ':', ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go new file mode 100644 index 0000000000000..3b4e22e5d4212 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_color_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go new file mode 100644 index 0000000000000..99395388c1e30 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go @@ -0,0 +1,35 @@ +package vm_indent + +import ( + "fmt" + + "github.com/goccy/go-json/internal/encoder" +) + +func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + defer func() { + if err := recover(); err != nil { + w := ctx.Option.DebugOut + fmt.Fprintln(w, "=============[DEBUG]===============") + fmt.Fprintln(w, "* [TYPE]") + fmt.Fprintln(w, codeSet.Type) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [ALL OPCODE]") + fmt.Fprintln(w, code.Dump()) + fmt.Fprintf(w, "\n") + fmt.Fprintln(w, "* [CONTEXT]") + fmt.Fprintf(w, "%+v\n", ctx) + fmt.Fprintln(w, "===================================") + panic(err) + } + }() + + return Run(ctx, b, codeSet) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go new file mode 100644 index 0000000000000..9e245bfe57d3d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go @@ -0,0 +1,9 @@ +package vm_indent + +import ( + // HACK: compile order + // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, + // so forcibly make dependencies and avoid compiling in concurrent. + // dependency order: vm => vm_indent => vm_color => vm_color_indent + _ "github.com/goccy/go-json/internal/encoder/vm_color" +) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go new file mode 100644 index 0000000000000..6cb745e3939b3 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go @@ -0,0 +1,230 @@ +package vm_indent + +import ( + "encoding/json" + "fmt" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +const uintptrSize = 4 << (^uintptr(0) >> 63) + +var ( + appendInt = encoder.AppendInt + appendUint = encoder.AppendUint + appendFloat32 = encoder.AppendFloat32 + appendFloat64 = encoder.AppendFloat64 + appendString = encoder.AppendString + appendByteSlice = encoder.AppendByteSlice + appendNumber = encoder.AppendNumber + appendStructEnd = encoder.AppendStructEndIndent + appendIndent = encoder.AppendIndent + errUnsupportedValue = encoder.ErrUnsupportedValue + errUnsupportedFloat = encoder.ErrUnsupportedFloat + mapiterinit = encoder.MapIterInit + mapiterkey = encoder.MapIterKey + mapitervalue = encoder.MapIterValue + mapiternext = encoder.MapIterNext + maplen = encoder.MapLen +) + +type emptyInterface struct { + typ *runtime.Type + ptr unsafe.Pointer +} + +type nonEmptyInterface struct { + itab *struct { + ityp *runtime.Type // static interface type + typ *runtime.Type // dynamic concrete type + // unused fields... + } + ptr unsafe.Pointer +} + +func errUnimplementedOp(op encoder.OpType) error { + return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) +} + +func load(base uintptr, idx uint32) uintptr { + addr := base + uintptr(idx) + return **(**uintptr)(unsafe.Pointer(&addr)) +} + +func store(base uintptr, idx uint32, p uintptr) { + addr := base + uintptr(idx) + **(**uintptr)(unsafe.Pointer(&addr)) = p +} + +func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { + addr := base + uintptr(idx) + p := **(**uintptr)(unsafe.Pointer(&addr)) + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUint64(p uintptr, bitSize uint8) uint64 { + switch bitSize { + case 8: + return (uint64)(**(**uint8)(unsafe.Pointer(&p))) + case 16: + return (uint64)(**(**uint16)(unsafe.Pointer(&p))) + case 32: + return (uint64)(**(**uint32)(unsafe.Pointer(&p))) + case 64: + return **(**uint64)(unsafe.Pointer(&p)) + } + return 0 +} +func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } +func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } +func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } +func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } +func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } +func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } +func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } +func ptrToPtr(p uintptr) uintptr { + return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) +} +func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { + for i := uint8(0); i < ptrNum; i++ { + if p == 0 { + return 0 + } + p = ptrToPtr(p) + } + return p +} + +func ptrToUnsafePtr(p uintptr) unsafe.Pointer { + return *(*unsafe.Pointer)(unsafe.Pointer(&p)) +} +func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { + return *(*interface{})(unsafe.Pointer(&emptyInterface{ + typ: code.Type, + ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), + })) +} + +func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { + if v { + return append(b, "true"...) + } + return append(b, "false"...) +} + +func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null"...) +} + +func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, ',', '\n') +} + +func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, "null,\n"...) +} + +func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b[:len(b)-2], ':', ' ') +} + +func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { + b = appendIndent(ctx, b, code.Indent+1) + b = append(b, key...) + b[len(b)-2] = ':' + b[len(b)-1] = ' ' + return append(b, value...) +} + +func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, '}', ',', '\n') +} + +func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = append(b, '[', '\n') + return appendIndent(ctx, b, code.Indent+1) +} + +func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = b[:len(b)-2] + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent) + return append(b, ']', ',', '\n') +} + +func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '[', ']', ',', '\n') +} + +func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '}', ',', '\n') +} + +func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) + return append(b, '}', ',', '\n') +} + +func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalJSONIndent(ctx, code, b, v) +} + +func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { + return encoder.AppendMarshalTextIndent(ctx, code, b, v) +} + +func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { + return append(b, '{', '\n') +} + +func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + b = appendIndent(ctx, b, code.Indent) + b = append(b, code.Key...) + return append(b, ' ') +} + +func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + last := len(b) - 1 + if b[last-1] == '{' { + b[last] = '}' + } else { + if b[last] == '\n' { + // to remove ',' and '\n' characters + b = b[:len(b)-2] + } + b = append(b, '\n') + b = appendIndent(ctx, b, code.Indent-1) + b = append(b, '}') + } + return appendComma(ctx, b) +} + +func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { + ctx.BaseIndent = uint32(load(ctxptr, code.Length)) +} + +func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { + store(ctxptr, code.Length, indent) +} + +func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent+1) +} + +func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { + return appendIndent(ctx, b, code.Indent) +} diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go new file mode 100644 index 0000000000000..836c5c8a85ac1 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go @@ -0,0 +1,4859 @@ +// Code generated by internal/cmd/generator. DO NOT EDIT! +package vm_indent + +import ( + "math" + "reflect" + "sort" + "unsafe" + + "github.com/goccy/go-json/internal/encoder" + "github.com/goccy/go-json/internal/runtime" +) + +func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { + recursiveLevel := 0 + ptrOffset := uintptr(0) + ctxptr := ctx.Ptr() + var code *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + code = codeSet.EscapeKeyCode + } else { + code = codeSet.NoescapeKeyCode + } + + for { + switch code.Op { + default: + return nil, errUnimplementedOp(code.Op) + case encoder.OpPtr: + p := load(ctxptr, code.Idx) + code = code.Next + store(ctxptr, code.Idx, ptrToPtr(p)) + case encoder.OpIntPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInt: + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpUint: + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpIntString: + b = append(b, '"') + b = appendInt(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpUintString: + b = append(b, '"') + b = appendUint(ctx, b, load(ctxptr, code.Idx), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat32Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat32: + b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpFloat64Ptr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpFloat64: + v := ptrToFloat64(load(ctxptr, code.Idx)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStringPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpString: + b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBoolPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBool: + b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpBytesPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpBytes: + b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpNumberPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpNumber: + bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpInterfacePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpInterface: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if p == seen { + return nil, errUnsupportedValue(code, p) + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, p) + var ( + typ *runtime.Type + ifacePtr unsafe.Pointer + ) + up := ptrToUnsafePtr(p) + if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { + iface := (*nonEmptyInterface)(up) + ifacePtr = iface.ptr + if iface.itab != nil { + typ = iface.itab.typ + } + } else { + iface := (*emptyInterface)(up) + ifacePtr = iface.ptr + typ = iface.typ + } + if ifacePtr == nil { + isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) + if !isDirectedNil { + b = appendNullComma(ctx, b) + code = code.Next + break + } + } + ctx.KeepRefs = append(ctx.KeepRefs, up) + ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) + if err != nil { + return nil, err + } + + totalLength := uintptr(code.Length) + 3 + nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 + + var c *encoder.Opcode + if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { + c = ifaceCodeSet.InterfaceEscapeKeyCode + } else { + c = ifaceCodeSet.InterfaceNoescapeKeyCode + } + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += totalLength * uintptrSize + oldBaseIndent := ctx.BaseIndent + ctx.BaseIndent += code.Indent + + newLen := offsetNum + totalLength + nextTotalLength + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + end := ifaceCodeSet.EndCode + store(ctxptr, c.Idx, uintptr(ifacePtr)) + store(ctxptr, end.Idx, oldOffset) + store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, end, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpInterfaceEnd: + recursiveLevel-- + + // restore ctxptr + offset := load(ctxptr, code.Idx) + restoreIndent(ctx, code, ctxptr) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToPtr(p)) + fallthrough + case encoder.OpMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + b = append(b, `""`...) + b = appendComma(ctx, b) + code = code.Next + break + } + if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p) + } + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpSlicePtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpSlice: + p := load(ctxptr, code.Idx) + slice := ptrToSlice(p) + if p == 0 || slice.Data == nil { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.ElemIdx, 0) + store(ctxptr, code.Length, uintptr(slice.Len)) + store(ctxptr, code.Idx, uintptr(slice.Data)) + if slice.Len > 0 { + b = appendArrayHead(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, uintptr(slice.Data)) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpSliceElem: + idx := load(ctxptr, code.ElemIdx) + length := load(ctxptr, code.Length) + idx++ + if idx < length { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + data := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, data+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpArrayPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpArray: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + if code.Length > 0 { + b = appendArrayHead(ctx, code, b) + store(ctxptr, code.ElemIdx, 0) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + b = appendEmptyArray(ctx, b) + code = code.End.Next + } + case encoder.OpArrayElem: + idx := load(ctxptr, code.ElemIdx) + idx++ + if idx < uintptr(code.Length) { + b = appendArrayElemIndent(ctx, code, b) + store(ctxptr, code.ElemIdx, idx) + p := load(ctxptr, code.Idx) + size := uintptr(code.Size) + code = code.Next + store(ctxptr, code.Idx, p+idx*size) + } else { + b = appendArrayEnd(ctx, code, b) + code = code.End.Next + } + case encoder.OpMapPtr: + p := loadNPtr(ctxptr, code.Idx, code.PtrNum) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + store(ctxptr, code.Idx, p) + fallthrough + case encoder.OpMap: + p := load(ctxptr, code.Idx) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.End.Next + break + } + uptr := ptrToUnsafePtr(p) + mlen := maplen(uptr) + if mlen <= 0 { + b = appendEmptyObject(ctx, b) + code = code.End.Next + break + } + b = appendStructHead(ctx, b) + unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 + mapCtx := encoder.NewMapContext(mlen, unorderedMap) + mapiterinit(code.Type, uptr, &mapCtx.Iter) + store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) + ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) + if unorderedMap { + b = appendMapKeyIndent(ctx, code.Next, b) + } else { + mapCtx.Start = len(b) + mapCtx.First = len(b) + } + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + case encoder.OpMapKey: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + idx := mapCtx.Idx + idx++ + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + if idx < mapCtx.Len { + b = appendMapKeyIndent(ctx, code, b) + mapCtx.Idx = int(idx) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + b = appendObjectEnd(ctx, code, b) + encoder.ReleaseMapContext(mapCtx) + code = code.End.Next + } + } else { + mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] + if idx < mapCtx.Len { + mapCtx.Idx = int(idx) + mapCtx.Start = len(b) + key := mapiterkey(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(key)) + code = code.Next + } else { + code = code.End + } + } + case encoder.OpMapValue: + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { + b = appendColon(ctx, b) + } else { + mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] + mapCtx.Start = len(b) + } + value := mapitervalue(&mapCtx.Iter) + store(ctxptr, code.Next.Idx, uintptr(value)) + mapiternext(&mapCtx.Iter) + code = code.Next + case encoder.OpMapEnd: + // this operation only used by sorted map. + mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) + sort.Sort(mapCtx.Slice) + buf := mapCtx.Buf + for _, item := range mapCtx.Slice.Items { + buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) + } + buf = appendMapEnd(ctx, code, buf) + b = b[:mapCtx.First] + b = append(b, buf...) + mapCtx.Buf = buf + encoder.ReleaseMapContext(mapCtx) + code = code.Next + case encoder.OpRecursivePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + code = code.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpRecursive: + ptr := load(ctxptr, code.Idx) + if ptr != 0 { + if recursiveLevel > encoder.StartDetectingCyclesAfter { + for _, seen := range ctx.SeenPtr { + if ptr == seen { + return nil, errUnsupportedValue(code, ptr) + } + } + } + } + ctx.SeenPtr = append(ctx.SeenPtr, ptr) + c := code.Jmp.Code + curlen := uintptr(len(ctx.Ptrs)) + offsetNum := ptrOffset / uintptrSize + oldOffset := ptrOffset + ptrOffset += code.Jmp.CurLen * uintptrSize + oldBaseIndent := ctx.BaseIndent + indentDiffFromTop := c.Indent - 1 + ctx.BaseIndent += code.Indent - indentDiffFromTop + + newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen + if curlen < newLen { + ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) + } + ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr + + store(ctxptr, c.Idx, ptr) + store(ctxptr, c.End.Next.Idx, oldOffset) + store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) + storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) + code = c + recursiveLevel++ + case encoder.OpRecursiveEnd: + recursiveLevel-- + + // restore ctxptr + restoreIndent(ctx, code, ctxptr) + offset := load(ctxptr, code.Idx) + ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] + + codePtr := load(ctxptr, code.ElemIdx) + code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) + ctxptr = ctx.Ptr() + offset + ptrOffset = offset + case encoder.OpStructPtrHead: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHead: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if len(code.Key) > 0 { + if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + } + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmpty: + p := load(ctxptr, code.Idx) + if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyInt: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyInt: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyIntString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + u64 := ptrToUint64(p, code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUint: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUint: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyUintString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat32(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64String: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToFloat64(p + uintptr(code.Offset)) + if v == 0 { + code = code.NextField + } else { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNull(ctx, b) + b = appendComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p+uintptr(code.Offset))))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyStringString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToString(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBool: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBool: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + } else { + code = code.NextField + } + case encoder.OpStructPtrHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytes: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyBytes: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumber: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumber: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + } + case encoder.OpStructPtrHeadNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberString: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + v := ptrToNumber(p + uintptr(code.Offset)) + if v == "" { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructPtrHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructPtrHeadArray, encoder.OpStructPtrHeadSlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadArray, encoder.OpStructHeadSlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyArray: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyArray: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptySlice: + if (code.Flags & encoder.IndirectFlags) != 0 { + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptySlice: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadArrayPtr, encoder.OpStructPtrHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadArrayPtr, encoder.OpStructHeadSlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyArrayPtr, encoder.OpStructPtrHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyArrayPtr, encoder.OpStructHeadOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructPtrHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMap: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p != 0 && (code.Flags&encoder.IndirectFlags) != 0 { + p = ptrToPtr(p + uintptr(code.Offset)) + } + if maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + break + } + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 { + code = code.NextField + } else { + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructPtrHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalJSON { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalJSON { + p = ptrToPtr(p) + } + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + } + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + if (code.Flags&encoder.IndirectFlags) != 0 || code.Op == encoder.OpStructPtrHeadOmitEmptyMarshalText { + p = ptrToPtr(p) + } + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructPtrHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + b = appendStructKey(ctx, code, b) + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructPtrHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) + fallthrough + case encoder.OpStructHeadOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendNullComma(ctx, b) + } + code = code.End.Next + break + } + if (code.Flags & encoder.IndirectFlags) != 0 { + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + } + if code.Flags&encoder.AnonymousHeadFlags == 0 { + b = appendStructHead(ctx, b) + } + if p == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + b = appendComma(ctx, b) + code = code.Next + } + case encoder.OpStructField: + if code.Flags&encoder.IsTaggedKeyFlags != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { + b = appendStructKey(ctx, code, b) + } + p := load(ctxptr, code.Idx) + uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmpty: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNullComma(ctx, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringString: + p := load(ctxptr, code.Idx) + s := ptrToString(p + uintptr(code.Offset)) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + b = appendStructKey(ctx, code, b) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendComma(ctx, b) + } + code = code.Next + case encoder.OpStructFieldMarshalJSON: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSON: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + iface := ptrToInterface(code, p) + if (code.Flags&encoder.NilCheckFlags) != 0 && encoder.IsNilForMarshaler(iface) { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, iface) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalJSONPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalJSONPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldMarshalText: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalText: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if (code.Flags & encoder.IsNilableTypeFlags) != 0 { + p = ptrToPtr(p) + } + if p == 0 && (code.Flags&encoder.NilCheckFlags) != 0 { + code = code.NextField + break + } + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + code = code.Next + case encoder.OpStructFieldMarshalTextPtr: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendComma(ctx, b) + code = code.Next + case encoder.OpStructFieldOmitEmptyMarshalTextPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) + if err != nil { + return nil, err + } + b = appendComma(ctx, bb) + } + code = code.Next + case encoder.OpStructFieldArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArray: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldArrayPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyArrayPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldSlice: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlice: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + slice := ptrToSlice(p) + if slice.Len == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldSlicePtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptySlicePtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldMap: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMap: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p == 0 || maplen(ptrToUnsafePtr(p)) == 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructFieldMapPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyMapPtr: + p := load(ctxptr, code.Idx) + p = ptrToPtr(p + uintptr(code.Offset)) + if p != 0 { + p = ptrToNPtr(p, code.PtrNum) + } + if p != 0 { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } else { + code = code.NextField + } + case encoder.OpStructFieldStruct: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + code = code.Next + store(ctxptr, code.Idx, p) + case encoder.OpStructFieldOmitEmptyStruct: + p := load(ctxptr, code.Idx) + p += uintptr(code.Offset) + if ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0 { + code = code.NextField + } else { + b = appendStructKey(ctx, code, b) + code = code.Next + store(ctxptr, code.Idx, p) + } + case encoder.OpStructEnd: + b = appendStructEndSkipLast(ctx, code, b) + code = code.Next + case encoder.OpStructEndInt: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyInt: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendInt(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendInt(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndIntPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyIntPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendInt(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUint: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUint: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintString: + p := load(ctxptr, code.Idx) + u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) + v := u64 & ((1 << code.NumBitSize) - 1) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p+uintptr(code.Offset), code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendUint(ctx, b, p, code) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendUint(ctx, b, p, code) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndUintPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyUintPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendUint(ctx, b, p, code) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32String: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32String: + p := load(ctxptr, code.Idx) + v := ptrToFloat32(p + uintptr(code.Offset)) + if v != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendFloat32(ctx, b, ptrToFloat32(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat32PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat32PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat32(ctx, b, ptrToFloat32(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64String: + p := load(ctxptr, code.Idx) + v := ptrToFloat64(p + uintptr(code.Offset)) + if v != 0 { + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64Ptr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + b = appendStructEnd(ctx, code, b) + code = code.Next + break + } + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64Ptr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndFloat64PtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = appendFloat64(ctx, b, v) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyFloat64PtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + v := ptrToFloat64(p) + if math.IsInf(v, 0) || math.IsNaN(v) { + return nil, errUnsupportedFloat(v) + } + b = append(b, '"') + b = appendFloat64(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + s := ptrToString(p + uintptr(code.Offset)) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, s))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringString: + p := load(ctxptr, code.Idx) + v := ptrToString(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, v))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, ptrToString(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, ptrToString(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndStringPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyStringPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendString(ctx, b, string(appendString(ctx, []byte{}, ptrToString(p)))) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBool: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBool: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p+uintptr(code.Offset))) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolString: + p := load(ctxptr, code.Idx) + v := ptrToBool(p + uintptr(code.Offset)) + if v { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, v) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendBool(ctx, b, ptrToBool(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendBool(ctx, b, ptrToBool(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBoolPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBoolPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + b = appendBool(ctx, b, ptrToBool(p)) + b = append(b, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytes: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p+uintptr(code.Offset))) + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytes: + p := load(ctxptr, code.Idx) + v := ptrToBytes(p + uintptr(code.Offset)) + if len(v) > 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, v) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndBytesPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = appendByteSlice(ctx, b, ptrToBytes(p)) + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyBytesPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = appendByteSlice(ctx, b, ptrToBytes(p)) + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumber: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + code = code.Next + case encoder.OpStructEndOmitEmptyNumber: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberString: + p := load(ctxptr, code.Idx) + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p+uintptr(code.Offset))) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberString: + p := load(ctxptr, code.Idx) + v := ptrToNumber(p + uintptr(code.Offset)) + if v != "" { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, v) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtr: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = bb + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtr: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = appendStructEnd(ctx, code, bb) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpStructEndNumberPtrString: + b = appendStructKey(ctx, code, b) + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p == 0 { + b = appendNull(ctx, b) + } else { + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + } + b = appendStructEnd(ctx, code, b) + code = code.Next + case encoder.OpStructEndOmitEmptyNumberPtrString: + p := load(ctxptr, code.Idx) + p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) + if p != 0 { + b = appendStructKey(ctx, code, b) + b = append(b, '"') + bb, err := appendNumber(ctx, b, ptrToNumber(p)) + if err != nil { + return nil, err + } + b = append(bb, '"') + b = appendStructEnd(ctx, code, b) + } else { + b = appendStructEndSkipLast(ctx, code, b) + } + code = code.Next + case encoder.OpEnd: + goto END + } + } +END: + return b, nil +} diff --git a/vendor/github.com/goccy/go-json/internal/errors/error.go b/vendor/github.com/goccy/go-json/internal/errors/error.go new file mode 100644 index 0000000000000..9207d0ff25e35 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/errors/error.go @@ -0,0 +1,183 @@ +package errors + +import ( + "fmt" + "reflect" + "strconv" +) + +type InvalidUTF8Error struct { + S string // the whole string value that caused the error +} + +func (e *InvalidUTF8Error) Error() string { + return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S)) +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type) + } + return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type) +} + +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. +type MarshalerError struct { + Type reflect.Type + Err error + sourceFunc string +} + +func (e *MarshalerError) Error() string { + srcFunc := e.sourceFunc + if srcFunc == "" { + srcFunc = "MarshalJSON" + } + return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error()) +} + +// Unwrap returns the underlying error. +func (e *MarshalerError) Unwrap() error { return e.Err } + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError struct { + msg string // description of error + Offset int64 // error occurred after reading Offset bytes +} + +func (e *SyntaxError) Error() string { return e.msg } + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s", + strconv.Quote(e.Key), e.Field.Name, e.Type.String(), + ) +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s", + e.Value, e.Struct, e.Field, e.Type, + ) + } + return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type) +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return fmt.Sprintf("json: unsupported type: %s", e.Type) +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return fmt.Sprintf("json: unsupported value: %s", e.Str) +} + +func ErrSyntax(msg string, offset int64) *SyntaxError { + return &SyntaxError{msg: msg, Offset: offset} +} + +func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError { + return &MarshalerError{ + Type: typ, + Err: err, + sourceFunc: msg, + } +} + +func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf(`invalid character "%c" exceeded max depth`, c), + Offset: cursor, + } +} + +func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError { + return &SyntaxError{msg: "not at beginning of value", Offset: cursor} +} + +func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("json: %s unexpected end of JSON input", msg), + Offset: cursor, + } +} + +func ErrExpected(msg string, cursor int64) *SyntaxError { + return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor} +} + +func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError { + if c == 0 { + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character as %s", context), + Offset: cursor, + } + } + return &SyntaxError{ + msg: fmt.Sprintf("json: invalid character %c as %s", c, context), + Offset: cursor, + } +} + +func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { + return &SyntaxError{ + msg: fmt.Sprintf("invalid character '%c' looking for beginning of value", c), + Offset: cursor, + } +} + +type PathError struct { + msg string +} + +func (e *PathError) Error() string { + return fmt.Sprintf("json: invalid path format: %s", e.msg) +} + +func ErrInvalidPath(msg string, args ...interface{}) *PathError { + if len(args) != 0 { + return &PathError{msg: fmt.Sprintf(msg, args...)} + } + return &PathError{msg: msg} +} + +func ErrEmptyPath() *PathError { + return &PathError{msg: "path is empty"} +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/rtype.go b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go new file mode 100644 index 0000000000000..37cfe35a1fd38 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/rtype.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +// Type representing reflect.rtype for noescape trick +type Type struct{} + +//go:linkname rtype_Align reflect.(*rtype).Align +//go:noescape +func rtype_Align(*Type) int + +func (t *Type) Align() int { + return rtype_Align(t) +} + +//go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign +//go:noescape +func rtype_FieldAlign(*Type) int + +func (t *Type) FieldAlign() int { + return rtype_FieldAlign(t) +} + +//go:linkname rtype_Method reflect.(*rtype).Method +//go:noescape +func rtype_Method(*Type, int) reflect.Method + +func (t *Type) Method(a0 int) reflect.Method { + return rtype_Method(t, a0) +} + +//go:linkname rtype_MethodByName reflect.(*rtype).MethodByName +//go:noescape +func rtype_MethodByName(*Type, string) (reflect.Method, bool) + +func (t *Type) MethodByName(a0 string) (reflect.Method, bool) { + return rtype_MethodByName(t, a0) +} + +//go:linkname rtype_NumMethod reflect.(*rtype).NumMethod +//go:noescape +func rtype_NumMethod(*Type) int + +func (t *Type) NumMethod() int { + return rtype_NumMethod(t) +} + +//go:linkname rtype_Name reflect.(*rtype).Name +//go:noescape +func rtype_Name(*Type) string + +func (t *Type) Name() string { + return rtype_Name(t) +} + +//go:linkname rtype_PkgPath reflect.(*rtype).PkgPath +//go:noescape +func rtype_PkgPath(*Type) string + +func (t *Type) PkgPath() string { + return rtype_PkgPath(t) +} + +//go:linkname rtype_Size reflect.(*rtype).Size +//go:noescape +func rtype_Size(*Type) uintptr + +func (t *Type) Size() uintptr { + return rtype_Size(t) +} + +//go:linkname rtype_String reflect.(*rtype).String +//go:noescape +func rtype_String(*Type) string + +func (t *Type) String() string { + return rtype_String(t) +} + +//go:linkname rtype_Kind reflect.(*rtype).Kind +//go:noescape +func rtype_Kind(*Type) reflect.Kind + +func (t *Type) Kind() reflect.Kind { + return rtype_Kind(t) +} + +//go:linkname rtype_Implements reflect.(*rtype).Implements +//go:noescape +func rtype_Implements(*Type, reflect.Type) bool + +func (t *Type) Implements(u reflect.Type) bool { + return rtype_Implements(t, u) +} + +//go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo +//go:noescape +func rtype_AssignableTo(*Type, reflect.Type) bool + +func (t *Type) AssignableTo(u reflect.Type) bool { + return rtype_AssignableTo(t, u) +} + +//go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo +//go:noescape +func rtype_ConvertibleTo(*Type, reflect.Type) bool + +func (t *Type) ConvertibleTo(u reflect.Type) bool { + return rtype_ConvertibleTo(t, u) +} + +//go:linkname rtype_Comparable reflect.(*rtype).Comparable +//go:noescape +func rtype_Comparable(*Type) bool + +func (t *Type) Comparable() bool { + return rtype_Comparable(t) +} + +//go:linkname rtype_Bits reflect.(*rtype).Bits +//go:noescape +func rtype_Bits(*Type) int + +func (t *Type) Bits() int { + return rtype_Bits(t) +} + +//go:linkname rtype_ChanDir reflect.(*rtype).ChanDir +//go:noescape +func rtype_ChanDir(*Type) reflect.ChanDir + +func (t *Type) ChanDir() reflect.ChanDir { + return rtype_ChanDir(t) +} + +//go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic +//go:noescape +func rtype_IsVariadic(*Type) bool + +func (t *Type) IsVariadic() bool { + return rtype_IsVariadic(t) +} + +//go:linkname rtype_Elem reflect.(*rtype).Elem +//go:noescape +func rtype_Elem(*Type) reflect.Type + +func (t *Type) Elem() *Type { + return Type2RType(rtype_Elem(t)) +} + +//go:linkname rtype_Field reflect.(*rtype).Field +//go:noescape +func rtype_Field(*Type, int) reflect.StructField + +func (t *Type) Field(i int) reflect.StructField { + return rtype_Field(t, i) +} + +//go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex +//go:noescape +func rtype_FieldByIndex(*Type, []int) reflect.StructField + +func (t *Type) FieldByIndex(index []int) reflect.StructField { + return rtype_FieldByIndex(t, index) +} + +//go:linkname rtype_FieldByName reflect.(*rtype).FieldByName +//go:noescape +func rtype_FieldByName(*Type, string) (reflect.StructField, bool) + +func (t *Type) FieldByName(name string) (reflect.StructField, bool) { + return rtype_FieldByName(t, name) +} + +//go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc +//go:noescape +func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool) + +func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) { + return rtype_FieldByNameFunc(t, match) +} + +//go:linkname rtype_In reflect.(*rtype).In +//go:noescape +func rtype_In(*Type, int) reflect.Type + +func (t *Type) In(i int) reflect.Type { + return rtype_In(t, i) +} + +//go:linkname rtype_Key reflect.(*rtype).Key +//go:noescape +func rtype_Key(*Type) reflect.Type + +func (t *Type) Key() *Type { + return Type2RType(rtype_Key(t)) +} + +//go:linkname rtype_Len reflect.(*rtype).Len +//go:noescape +func rtype_Len(*Type) int + +func (t *Type) Len() int { + return rtype_Len(t) +} + +//go:linkname rtype_NumField reflect.(*rtype).NumField +//go:noescape +func rtype_NumField(*Type) int + +func (t *Type) NumField() int { + return rtype_NumField(t) +} + +//go:linkname rtype_NumIn reflect.(*rtype).NumIn +//go:noescape +func rtype_NumIn(*Type) int + +func (t *Type) NumIn() int { + return rtype_NumIn(t) +} + +//go:linkname rtype_NumOut reflect.(*rtype).NumOut +//go:noescape +func rtype_NumOut(*Type) int + +func (t *Type) NumOut() int { + return rtype_NumOut(t) +} + +//go:linkname rtype_Out reflect.(*rtype).Out +//go:noescape +func rtype_Out(*Type, int) reflect.Type + +//go:linkname PtrTo reflect.(*rtype).ptrTo +//go:noescape +func PtrTo(*Type) *Type + +func (t *Type) Out(i int) reflect.Type { + return rtype_Out(t, i) +} + +//go:linkname IfaceIndir reflect.ifaceIndir +//go:noescape +func IfaceIndir(*Type) bool + +//go:linkname RType2Type reflect.toType +//go:noescape +func RType2Type(t *Type) reflect.Type + +type emptyInterface struct { + _ *Type + ptr unsafe.Pointer +} + +func Type2RType(t reflect.Type) *Type { + return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr) +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go new file mode 100644 index 0000000000000..baab0c5978d33 --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "reflect" + "strings" + "unicode" +) + +func getTag(field reflect.StructField) string { + return field.Tag.Get("json") +} + +func IsIgnoredStructField(field reflect.StructField) bool { + if field.PkgPath != "" { + if field.Anonymous { + t := field.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return true + } + } else { + // private field + return true + } + } + tag := getTag(field) + return tag == "-" +} + +type StructTag struct { + Key string + IsTaggedKey bool + IsOmitEmpty bool + IsString bool + Field reflect.StructField +} + +type StructTags []*StructTag + +func (t StructTags) ExistsKey(key string) bool { + for _, tt := range t { + if tt.Key == key { + return true + } + } + return false +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +func StructTagFromField(field reflect.StructField) *StructTag { + keyName := field.Name + tag := getTag(field) + st := &StructTag{Field: field} + opts := strings.Split(tag, ",") + if len(opts) > 0 { + if opts[0] != "" && isValidTag(opts[0]) { + keyName = opts[0] + st.IsTaggedKey = true + } + } + st.Key = keyName + if len(opts) > 1 { + for _, opt := range opts[1:] { + switch opt { + case "omitempty": + st.IsOmitEmpty = true + case "string": + st.IsString = true + } + } + } + return st +} diff --git a/vendor/github.com/goccy/go-json/internal/runtime/type.go b/vendor/github.com/goccy/go-json/internal/runtime/type.go new file mode 100644 index 0000000000000..0167cd2c0183d --- /dev/null +++ b/vendor/github.com/goccy/go-json/internal/runtime/type.go @@ -0,0 +1,100 @@ +package runtime + +import ( + "reflect" + "unsafe" +) + +type SliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +const ( + maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib +) + +type TypeAddr struct { + BaseTypeAddr uintptr + MaxTypeAddr uintptr + AddrRange uintptr + AddrShift uintptr +} + +var ( + typeAddr *TypeAddr + alreadyAnalyzed bool +) + +//go:linkname typelinks reflect.typelinks +func typelinks() ([]unsafe.Pointer, [][]int32) + +//go:linkname rtypeOff reflect.rtypeOff +func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer + +func AnalyzeTypeAddr() *TypeAddr { + defer func() { + alreadyAnalyzed = true + }() + if alreadyAnalyzed { + return typeAddr + } + sections, offsets := typelinks() + if len(sections) != 1 { + return nil + } + if len(offsets) != 1 { + return nil + } + section := sections[0] + offset := offsets[0] + var ( + min uintptr = uintptr(^uint(0)) + max uintptr = 0 + isAligned64 = true + isAligned32 = true + ) + for i := 0; i < len(offset); i++ { + typ := (*Type)(rtypeOff(section, offset[i])) + addr := uintptr(unsafe.Pointer(typ)) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + if typ.Kind() == reflect.Ptr { + addr = uintptr(unsafe.Pointer(typ.Elem())) + if min > addr { + min = addr + } + if max < addr { + max = addr + } + } + isAligned64 = isAligned64 && (addr-min)&63 == 0 + isAligned32 = isAligned32 && (addr-min)&31 == 0 + } + addrRange := max - min + if addrRange == 0 { + return nil + } + var addrShift uintptr + if isAligned64 { + addrShift = 6 + } else if isAligned32 { + addrShift = 5 + } + cacheSize := addrRange >> addrShift + if cacheSize > maxAcceptableTypeAddrRange { + return nil + } + typeAddr = &TypeAddr{ + BaseTypeAddr: min, + MaxTypeAddr: max, + AddrRange: addrRange, + AddrShift: addrShift, + } + return typeAddr +} diff --git a/vendor/github.com/goccy/go-json/json.go b/vendor/github.com/goccy/go-json/json.go new file mode 100644 index 0000000000000..fb18065a23129 --- /dev/null +++ b/vendor/github.com/goccy/go-json/json.go @@ -0,0 +1,368 @@ +package json + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/goccy/go-json/internal/encoder" +) + +// Marshaler is the interface implemented by types that +// can marshal themselves into valid JSON. +type Marshaler interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerContext is the interface implemented by types that +// can marshal themselves into valid JSON with context.Context. +type MarshalerContext interface { + MarshalJSON(context.Context) ([]byte, error) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerContext is the interface implemented by types +// that can unmarshal with context.Context a JSON description of themselves. +type UnmarshalerContext interface { + UnmarshalJSON(context.Context, []byte) error +} + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder that had SetEscapeHTML(false) +// called on it. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. +// Each exported struct field becomes a member of the object, using the +// field name as the object key, unless the field is omitted for one of the +// reasons given below. +// +// The encoding of each struct field can be customized by the format string +// stored under the "json" key in the struct field's tag. +// The format string gives the name of the field, possibly followed by a +// comma-separated list of options. The name may be empty in order to +// specify options without overriding the default field name. +// +// The "omitempty" option specifies that the field should be omitted +// from the encoding if the field has an empty value, defined as +// false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. +// +// As a special case, if the field tag is "-", the field is always omitted. +// Note that a field with name "-" can still be generated using the tag "-,". +// +// Examples of struct field tags and their meanings: +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "-". +// Field int `json:"-,"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, and ASCII punctuation except quotation +// marks, backslash, and comma. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a +// string, an integer type, or implement encoding.TextMarshaler. The map keys +// are sorted and used as JSON object keys by applying the following rules, +// subject to the UTF-8 coercion described for string values above: +// - string keys are used directly +// - encoding.TextMarshalers are marshaled +// - integer keys are converted to strings +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +func Marshal(v interface{}) ([]byte, error) { + return MarshalWithOption(v) +} + +// MarshalNoEscape returns the JSON encoding of v and doesn't escape v. +func MarshalNoEscape(v interface{}) ([]byte, error) { + return marshalNoEscape(v) +} + +// MarshalContext returns the JSON encoding of v with context.Context and EncodeOption. +func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalContext(ctx, v, optFuncs...) +} + +// MarshalWithOption returns the JSON encoding of v with EncodeOption. +func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshal(v, optFuncs...) +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +// Each JSON element in the output will begin on a new line beginning with prefix +// followed by one or more copies of indent according to the indentation nesting. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return MarshalIndentWithOption(v, prefix, indent) +} + +// MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption. +func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { + return marshalIndent(v, prefix, indent, optFuncs...) +} + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v interface{}) error { + return unmarshal(data, v) +} + +// UnmarshalContext parses the JSON-encoded data and stores the result +// in the value pointed to by v. If you implement the UnmarshalerContext interface, +// call it with ctx as an argument. +func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalContext(ctx, data, v) +} + +func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshal(data, v, optFuncs...) +} + +func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { + return unmarshalNoEscape(data, v, optFuncs...) +} + +// A Token holds a value of one of these types: +// +// Delim, for the four JSON delimiters [ ] { } +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +type Token = json.Token + +// A Number represents a JSON number literal. +type Number = json.Number + +// RawMessage is a raw encoded JSON value. +// It implements Marshaler and Unmarshaler and can +// be used to delay JSON decoding or precompute a JSON encoding. +type RawMessage = json.RawMessage + +// A Delim is a JSON array or object delimiter, one of [ ] { or }. +type Delim = json.Delim + +// Compact appends to dst the JSON-encoded src with +// insignificant space characters elided. +func Compact(dst *bytes.Buffer, src []byte) error { + return encoder.Compact(dst, src, false) +} + +// Indent appends to dst an indented form of the JSON-encoded src. +// Each element in a JSON object or array begins on a new, +// indented line beginning with prefix followed by one or more +// copies of indent according to the indentation nesting. +// The data appended to dst does not begin with the prefix nor +// any indentation, to make it easier to embed inside other formatted JSON data. +// Although leading space characters (space, tab, carriage return, newline) +// at the beginning of src are dropped, trailing space characters +// at the end of src are preserved and copied to dst. +// For example, if src has no trailing spaces, neither will dst; +// if src ends in a trailing newline, so will dst. +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return encoder.Indent(dst, src, prefix, indent) +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML